commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce54b2e002a46f3abecccbd7cdd591c1ec4c7074
|
beetsplug/embedcoverart.py
|
beetsplug/embedcoverart.py
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
Add initial version of the embed coverart plugin.
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.
|
Python
|
mit
|
google-code-export/beets,google-code-export/beets,google-code-export/beets
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
<commit_before><commit_msg>Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.<commit_after>
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
<commit_before><commit_msg>Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.<commit_after>from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
|
c62dfb75477f5a641a2a9fd2de16d96cc2638e5f
|
migrations/versions/ebcc92fc4d27_remove_tag_table.py
|
migrations/versions/ebcc92fc4d27_remove_tag_table.py
|
"""Remove tag table
Revision ID: ebcc92fc4d27
Revises: 444c69da7c45
Create Date: 2017-05-08 01:01:48.865909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ebcc92fc4d27'
down_revision = '444c69da7c45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
op.drop_table('tag_association')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_association',
sa.Column('bucketlist_item_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bucketlist_item_id'], ['bucketlist_item.id'], name='tag_association_bucketlist_item_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='tag_association_tag_id_fkey')
)
op.create_table('tag',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='tag_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='tag_pkey')
)
# ### end Alembic commands ###
|
Remove tags field from user Model
|
[Chore] Migrate: Remove tags field from user Model
|
Python
|
mit
|
andela-wcyn/bucketlist
|
[Chore] Migrate: Remove tags field from user Model
|
"""Remove tag table
Revision ID: ebcc92fc4d27
Revises: 444c69da7c45
Create Date: 2017-05-08 01:01:48.865909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ebcc92fc4d27'
down_revision = '444c69da7c45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
op.drop_table('tag_association')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_association',
sa.Column('bucketlist_item_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bucketlist_item_id'], ['bucketlist_item.id'], name='tag_association_bucketlist_item_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='tag_association_tag_id_fkey')
)
op.create_table('tag',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='tag_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='tag_pkey')
)
# ### end Alembic commands ###
|
<commit_before><commit_msg>[Chore] Migrate: Remove tags field from user Model<commit_after>
|
"""Remove tag table
Revision ID: ebcc92fc4d27
Revises: 444c69da7c45
Create Date: 2017-05-08 01:01:48.865909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ebcc92fc4d27'
down_revision = '444c69da7c45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
op.drop_table('tag_association')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_association',
sa.Column('bucketlist_item_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bucketlist_item_id'], ['bucketlist_item.id'], name='tag_association_bucketlist_item_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='tag_association_tag_id_fkey')
)
op.create_table('tag',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='tag_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='tag_pkey')
)
# ### end Alembic commands ###
|
[Chore] Migrate: Remove tags field from user Model"""Remove tag table
Revision ID: ebcc92fc4d27
Revises: 444c69da7c45
Create Date: 2017-05-08 01:01:48.865909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ebcc92fc4d27'
down_revision = '444c69da7c45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
op.drop_table('tag_association')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_association',
sa.Column('bucketlist_item_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bucketlist_item_id'], ['bucketlist_item.id'], name='tag_association_bucketlist_item_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='tag_association_tag_id_fkey')
)
op.create_table('tag',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='tag_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='tag_pkey')
)
# ### end Alembic commands ###
|
<commit_before><commit_msg>[Chore] Migrate: Remove tags field from user Model<commit_after>"""Remove tag table
Revision ID: ebcc92fc4d27
Revises: 444c69da7c45
Create Date: 2017-05-08 01:01:48.865909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ebcc92fc4d27'
down_revision = '444c69da7c45'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
op.drop_table('tag_association')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_association',
sa.Column('bucketlist_item_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('tag_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['bucketlist_item_id'], ['bucketlist_item.id'], name='tag_association_bucketlist_item_id_fkey'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='tag_association_tag_id_fkey')
)
op.create_table('tag',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='tag_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='tag_pkey')
)
# ### end Alembic commands ###
|
|
299882e55ea82d8c442b51b243caf63707befe4c
|
xoinvader/tests/test_utils.py
|
xoinvader/tests/test_utils.py
|
import sys
import unittest
import pprint
from xoinvader.utils import create_logger
from xoinvader.utils import InfiniteList
from xoinvader.utils import Point
class TestUtils(unittest.TestCase):
def test_create_logger(self):
logger = create_logger("test", "test.log")
self.assertTrue(logger)
def test_point_operations(self):
ax, ay, bx, by = 10, 10, 20, 20
a = Point(ax, ay)
b = Point(bx, by)
self.assertEqual(a.x, ax)
self.assertEqual(a.y, ay)
self.assertEqual(b.x, bx)
self.assertEqual(b.y, by)
self.assertEqual(a.__repr__(), "Point(x={}, y={})".format(a.x, a.y))
self.assertEqual(a + b, Point(ax + bx, ay + by))
a.x = bx
a.y = by
self.assertEqual(a.x, bx)
self.assertEqual(a.y, by)
b.x = -bx
b.y = -by
self.assertEqual(a + b, Point(0, 0))
self.assertEqual(a + Point(-50, -50), Point(-30, -30))
def test_infinite_list_operations(self):
# Test empty InfiniteList behaviour
inf_list = InfiniteList()
for func in [inf_list.current, inf_list.next, inf_list.prev]:
self.assertRaises(IndexError, func)
# Test one element behaviour
data = "test1"
inf_list = InfiniteList([data])
self.assertEqual(len(inf_list)), 1)
self.assertEqual(inf_list[0], data)
self.assertEqual(inf_list.current(), data)
self.assertEqual(inf_list.next(), data)
self.assertEqual(inf_list.prev(), data)
# Test many elements behaviour
|
Add tests for utils module.
|
Add tests for utils module.
|
Python
|
mit
|
pkulev/xoinvader,pankshok/xoinvader
|
Add tests for utils module.
|
import sys
import unittest
import pprint
from xoinvader.utils import create_logger
from xoinvader.utils import InfiniteList
from xoinvader.utils import Point
class TestUtils(unittest.TestCase):
def test_create_logger(self):
logger = create_logger("test", "test.log")
self.assertTrue(logger)
def test_point_operations(self):
ax, ay, bx, by = 10, 10, 20, 20
a = Point(ax, ay)
b = Point(bx, by)
self.assertEqual(a.x, ax)
self.assertEqual(a.y, ay)
self.assertEqual(b.x, bx)
self.assertEqual(b.y, by)
self.assertEqual(a.__repr__(), "Point(x={}, y={})".format(a.x, a.y))
self.assertEqual(a + b, Point(ax + bx, ay + by))
a.x = bx
a.y = by
self.assertEqual(a.x, bx)
self.assertEqual(a.y, by)
b.x = -bx
b.y = -by
self.assertEqual(a + b, Point(0, 0))
self.assertEqual(a + Point(-50, -50), Point(-30, -30))
def test_infinite_list_operations(self):
# Test empty InfiniteList behaviour
inf_list = InfiniteList()
for func in [inf_list.current, inf_list.next, inf_list.prev]:
self.assertRaises(IndexError, func)
# Test one element behaviour
data = "test1"
inf_list = InfiniteList([data])
self.assertEqual(len(inf_list)), 1)
self.assertEqual(inf_list[0], data)
self.assertEqual(inf_list.current(), data)
self.assertEqual(inf_list.next(), data)
self.assertEqual(inf_list.prev(), data)
# Test many elements behaviour
|
<commit_before><commit_msg>Add tests for utils module.<commit_after>
|
import sys
import unittest
import pprint
from xoinvader.utils import create_logger
from xoinvader.utils import InfiniteList
from xoinvader.utils import Point
class TestUtils(unittest.TestCase):
def test_create_logger(self):
logger = create_logger("test", "test.log")
self.assertTrue(logger)
def test_point_operations(self):
ax, ay, bx, by = 10, 10, 20, 20
a = Point(ax, ay)
b = Point(bx, by)
self.assertEqual(a.x, ax)
self.assertEqual(a.y, ay)
self.assertEqual(b.x, bx)
self.assertEqual(b.y, by)
self.assertEqual(a.__repr__(), "Point(x={}, y={})".format(a.x, a.y))
self.assertEqual(a + b, Point(ax + bx, ay + by))
a.x = bx
a.y = by
self.assertEqual(a.x, bx)
self.assertEqual(a.y, by)
b.x = -bx
b.y = -by
self.assertEqual(a + b, Point(0, 0))
self.assertEqual(a + Point(-50, -50), Point(-30, -30))
def test_infinite_list_operations(self):
# Test empty InfiniteList behaviour
inf_list = InfiniteList()
for func in [inf_list.current, inf_list.next, inf_list.prev]:
self.assertRaises(IndexError, func)
# Test one element behaviour
data = "test1"
inf_list = InfiniteList([data])
self.assertEqual(len(inf_list)), 1)
self.assertEqual(inf_list[0], data)
self.assertEqual(inf_list.current(), data)
self.assertEqual(inf_list.next(), data)
self.assertEqual(inf_list.prev(), data)
# Test many elements behaviour
|
Add tests for utils module.import sys
import unittest
import pprint
from xoinvader.utils import create_logger
from xoinvader.utils import InfiniteList
from xoinvader.utils import Point
class TestUtils(unittest.TestCase):
def test_create_logger(self):
logger = create_logger("test", "test.log")
self.assertTrue(logger)
def test_point_operations(self):
ax, ay, bx, by = 10, 10, 20, 20
a = Point(ax, ay)
b = Point(bx, by)
self.assertEqual(a.x, ax)
self.assertEqual(a.y, ay)
self.assertEqual(b.x, bx)
self.assertEqual(b.y, by)
self.assertEqual(a.__repr__(), "Point(x={}, y={})".format(a.x, a.y))
self.assertEqual(a + b, Point(ax + bx, ay + by))
a.x = bx
a.y = by
self.assertEqual(a.x, bx)
self.assertEqual(a.y, by)
b.x = -bx
b.y = -by
self.assertEqual(a + b, Point(0, 0))
self.assertEqual(a + Point(-50, -50), Point(-30, -30))
def test_infinite_list_operations(self):
# Test empty InfiniteList behaviour
inf_list = InfiniteList()
for func in [inf_list.current, inf_list.next, inf_list.prev]:
self.assertRaises(IndexError, func)
# Test one element behaviour
data = "test1"
inf_list = InfiniteList([data])
self.assertEqual(len(inf_list)), 1)
self.assertEqual(inf_list[0], data)
self.assertEqual(inf_list.current(), data)
self.assertEqual(inf_list.next(), data)
self.assertEqual(inf_list.prev(), data)
# Test many elements behaviour
|
<commit_before><commit_msg>Add tests for utils module.<commit_after>import sys
import unittest
import pprint
from xoinvader.utils import create_logger
from xoinvader.utils import InfiniteList
from xoinvader.utils import Point
class TestUtils(unittest.TestCase):
def test_create_logger(self):
logger = create_logger("test", "test.log")
self.assertTrue(logger)
def test_point_operations(self):
ax, ay, bx, by = 10, 10, 20, 20
a = Point(ax, ay)
b = Point(bx, by)
self.assertEqual(a.x, ax)
self.assertEqual(a.y, ay)
self.assertEqual(b.x, bx)
self.assertEqual(b.y, by)
self.assertEqual(a.__repr__(), "Point(x={}, y={})".format(a.x, a.y))
self.assertEqual(a + b, Point(ax + bx, ay + by))
a.x = bx
a.y = by
self.assertEqual(a.x, bx)
self.assertEqual(a.y, by)
b.x = -bx
b.y = -by
self.assertEqual(a + b, Point(0, 0))
self.assertEqual(a + Point(-50, -50), Point(-30, -30))
def test_infinite_list_operations(self):
# Test empty InfiniteList behaviour
inf_list = InfiniteList()
for func in [inf_list.current, inf_list.next, inf_list.prev]:
self.assertRaises(IndexError, func)
# Test one element behaviour
data = "test1"
inf_list = InfiniteList([data])
self.assertEqual(len(inf_list)), 1)
self.assertEqual(inf_list[0], data)
self.assertEqual(inf_list.current(), data)
self.assertEqual(inf_list.next(), data)
self.assertEqual(inf_list.prev(), data)
# Test many elements behaviour
|
|
9ac9f94f3b1409ae3a47c8a9e890f578a69b020f
|
test/requests/test_forgot_password.py
|
test/requests/test_forgot_password.py
|
import requests
from wqflask import user_manager
from utility.elasticsearch_tools import get_user_by_unique_column
from parameterized import parameterized
from parametrized_test import ParametrizedTest
passwork_reset_link = ''
forgot_password_page = None
class TestForgotPassword(ParametrizedTest):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.forgot_password_url = self.gn2_url+"/n/forgot_password_submit"
def send_email(to_addr, msg, fromaddr="no-reply@genenetwork.org"):
print("CALLING: send_email_mock()")
email_data = {
"to_addr": to_addr
, "msg": msg
, "fromaddr": from_addr}
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testWithoutEmail(self):
data = {"email_address": ""}
error_notification = '<div class="alert alert-danger">You MUST provide an email</div>'
result = requests.post(self.forgot_password_url, data=data)
self.assertEqual(result.url, self.gn2_url+"/n/forgot_password")
self.assertTrue(
result.content.find(error_notification) >= 0
, "Error message should be displayed but was not")
def testWithNonExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "non-existent@domain.com")
self.assertTrue(user is None, "Should not find non-existent user")
def testWithExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "test@user.com")
self.assertTrue(user is not None, "Should find user")
|
Add tests for Forgot Password feature
|
Add tests for Forgot Password feature
* Add tests to ensure the "Forgot Password" feature works appropriately.
|
Python
|
agpl-3.0
|
zsloan/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2
|
Add tests for Forgot Password feature
* Add tests to ensure the "Forgot Password" feature works appropriately.
|
import requests
from wqflask import user_manager
from utility.elasticsearch_tools import get_user_by_unique_column
from parameterized import parameterized
from parametrized_test import ParametrizedTest
passwork_reset_link = ''
forgot_password_page = None
class TestForgotPassword(ParametrizedTest):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.forgot_password_url = self.gn2_url+"/n/forgot_password_submit"
def send_email(to_addr, msg, fromaddr="no-reply@genenetwork.org"):
print("CALLING: send_email_mock()")
email_data = {
"to_addr": to_addr
, "msg": msg
, "fromaddr": from_addr}
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testWithoutEmail(self):
data = {"email_address": ""}
error_notification = '<div class="alert alert-danger">You MUST provide an email</div>'
result = requests.post(self.forgot_password_url, data=data)
self.assertEqual(result.url, self.gn2_url+"/n/forgot_password")
self.assertTrue(
result.content.find(error_notification) >= 0
, "Error message should be displayed but was not")
def testWithNonExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "non-existent@domain.com")
self.assertTrue(user is None, "Should not find non-existent user")
def testWithExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "test@user.com")
self.assertTrue(user is not None, "Should find user")
|
<commit_before><commit_msg>Add tests for Forgot Password feature
* Add tests to ensure the "Forgot Password" feature works appropriately.<commit_after>
|
import requests
from wqflask import user_manager
from utility.elasticsearch_tools import get_user_by_unique_column
from parameterized import parameterized
from parametrized_test import ParametrizedTest
passwork_reset_link = ''
forgot_password_page = None
class TestForgotPassword(ParametrizedTest):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.forgot_password_url = self.gn2_url+"/n/forgot_password_submit"
def send_email(to_addr, msg, fromaddr="no-reply@genenetwork.org"):
print("CALLING: send_email_mock()")
email_data = {
"to_addr": to_addr
, "msg": msg
, "fromaddr": from_addr}
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testWithoutEmail(self):
data = {"email_address": ""}
error_notification = '<div class="alert alert-danger">You MUST provide an email</div>'
result = requests.post(self.forgot_password_url, data=data)
self.assertEqual(result.url, self.gn2_url+"/n/forgot_password")
self.assertTrue(
result.content.find(error_notification) >= 0
, "Error message should be displayed but was not")
def testWithNonExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "non-existent@domain.com")
self.assertTrue(user is None, "Should not find non-existent user")
def testWithExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "test@user.com")
self.assertTrue(user is not None, "Should find user")
|
Add tests for Forgot Password feature
* Add tests to ensure the "Forgot Password" feature works appropriately.import requests
from wqflask import user_manager
from utility.elasticsearch_tools import get_user_by_unique_column
from parameterized import parameterized
from parametrized_test import ParametrizedTest
passwork_reset_link = ''
forgot_password_page = None
class TestForgotPassword(ParametrizedTest):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.forgot_password_url = self.gn2_url+"/n/forgot_password_submit"
def send_email(to_addr, msg, fromaddr="no-reply@genenetwork.org"):
print("CALLING: send_email_mock()")
email_data = {
"to_addr": to_addr
, "msg": msg
, "fromaddr": from_addr}
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testWithoutEmail(self):
data = {"email_address": ""}
error_notification = '<div class="alert alert-danger">You MUST provide an email</div>'
result = requests.post(self.forgot_password_url, data=data)
self.assertEqual(result.url, self.gn2_url+"/n/forgot_password")
self.assertTrue(
result.content.find(error_notification) >= 0
, "Error message should be displayed but was not")
def testWithNonExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "non-existent@domain.com")
self.assertTrue(user is None, "Should not find non-existent user")
def testWithExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "test@user.com")
self.assertTrue(user is not None, "Should find user")
|
<commit_before><commit_msg>Add tests for Forgot Password feature
* Add tests to ensure the "Forgot Password" feature works appropriately.<commit_after>import requests
from wqflask import user_manager
from utility.elasticsearch_tools import get_user_by_unique_column
from parameterized import parameterized
from parametrized_test import ParametrizedTest
passwork_reset_link = ''
forgot_password_page = None
class TestForgotPassword(ParametrizedTest):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.forgot_password_url = self.gn2_url+"/n/forgot_password_submit"
def send_email(to_addr, msg, fromaddr="no-reply@genenetwork.org"):
print("CALLING: send_email_mock()")
email_data = {
"to_addr": to_addr
, "msg": msg
, "fromaddr": from_addr}
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testWithoutEmail(self):
data = {"email_address": ""}
error_notification = '<div class="alert alert-danger">You MUST provide an email</div>'
result = requests.post(self.forgot_password_url, data=data)
self.assertEqual(result.url, self.gn2_url+"/n/forgot_password")
self.assertTrue(
result.content.find(error_notification) >= 0
, "Error message should be displayed but was not")
def testWithNonExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "non-existent@domain.com")
self.assertTrue(user is None, "Should not find non-existent user")
def testWithExistingEmail(self):
# Monkey patching doesn't work, so simply test that getting by email
# returns the correct data
user = get_user_by_unique_column(self.es, "email_address", "test@user.com")
self.assertTrue(user is not None, "Should find user")
|
|
42b50470458d91cd0882f927ed83ba267ab78764
|
zinnia_wymeditor/__init__.py
|
zinnia_wymeditor/__init__.py
|
"""WYMeditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-wymeditor'
|
Create zinnia_wymeditor module with his metadatas
|
Create zinnia_wymeditor module with his metadatas
|
Python
|
bsd-3-clause
|
layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor
|
Create zinnia_wymeditor module with his metadatas
|
"""WYMeditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-wymeditor'
|
<commit_before><commit_msg>Create zinnia_wymeditor module with his metadatas<commit_after>
|
"""WYMeditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-wymeditor'
|
Create zinnia_wymeditor module with his metadatas"""WYMeditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-wymeditor'
|
<commit_before><commit_msg>Create zinnia_wymeditor module with his metadatas<commit_after>"""WYMeditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-wymeditor'
|
|
435cc2548b38d92f8ffdc4bee8845f5a58d655ce
|
ocrsite/ocrlab/management/commands/runfilescript.py
|
ocrsite/ocrlab/management/commands/runfilescript.py
|
"""
Run a script that takes a file input/output.
"""
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
from ocrlab import models, stages, nodes
from nodetree import script, registry
class Command(BaseCommand):
args = "<scriptfile1> <infile> <outfile>"
help = "Run a script on a given file, saving the output."
option_list = BaseCommand.option_list + (
# TODO: Add options...
)
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError("Usage: %s" % self.help)
scriptfile, infile, outfile = args
try:
with open(scriptfile, "r") as f:
nodes = json.load(f)
except Exception:
raise CommandError("Invalid script file: %s" % scriptfile)
if nodes is None:
raise CommandError("No nodes found in script: %s" % scriptfile)
s = script.Script(nodes)
input = s.get_nodes_by_attr("stage", stages.INPUT)[0]
input.set_param("path", infile)
term = s.get_terminals()[0]
sys.stderr.write("Rendering to %s\n" % outfile)
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outfile))])
out.set_input(0, term)
out.eval()
|
Add a management command that runs a given script taking an input file and an output file as args.
|
Add a management command that runs a given script taking an input file and an output file as args.
|
Python
|
mit
|
mikesname/python-ocrlab
|
Add a management command that runs a given script taking an input file and an output file as args.
|
"""
Run a script that takes a file input/output.
"""
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
from ocrlab import models, stages, nodes
from nodetree import script, registry
class Command(BaseCommand):
args = "<scriptfile1> <infile> <outfile>"
help = "Run a script on a given file, saving the output."
option_list = BaseCommand.option_list + (
# TODO: Add options...
)
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError("Usage: %s" % self.help)
scriptfile, infile, outfile = args
try:
with open(scriptfile, "r") as f:
nodes = json.load(f)
except Exception:
raise CommandError("Invalid script file: %s" % scriptfile)
if nodes is None:
raise CommandError("No nodes found in script: %s" % scriptfile)
s = script.Script(nodes)
input = s.get_nodes_by_attr("stage", stages.INPUT)[0]
input.set_param("path", infile)
term = s.get_terminals()[0]
sys.stderr.write("Rendering to %s\n" % outfile)
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outfile))])
out.set_input(0, term)
out.eval()
|
<commit_before><commit_msg>Add a management command that runs a given script taking an input file and an output file as args.<commit_after>
|
"""
Run a script that takes a file input/output.
"""
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
from ocrlab import models, stages, nodes
from nodetree import script, registry
class Command(BaseCommand):
args = "<scriptfile1> <infile> <outfile>"
help = "Run a script on a given file, saving the output."
option_list = BaseCommand.option_list + (
# TODO: Add options...
)
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError("Usage: %s" % self.help)
scriptfile, infile, outfile = args
try:
with open(scriptfile, "r") as f:
nodes = json.load(f)
except Exception:
raise CommandError("Invalid script file: %s" % scriptfile)
if nodes is None:
raise CommandError("No nodes found in script: %s" % scriptfile)
s = script.Script(nodes)
input = s.get_nodes_by_attr("stage", stages.INPUT)[0]
input.set_param("path", infile)
term = s.get_terminals()[0]
sys.stderr.write("Rendering to %s\n" % outfile)
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outfile))])
out.set_input(0, term)
out.eval()
|
Add a management command that runs a given script taking an input file and an output file as args."""
Run a script that takes a file input/output.
"""
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
from ocrlab import models, stages, nodes
from nodetree import script, registry
class Command(BaseCommand):
args = "<scriptfile1> <infile> <outfile>"
help = "Run a script on a given file, saving the output."
option_list = BaseCommand.option_list + (
# TODO: Add options...
)
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError("Usage: %s" % self.help)
scriptfile, infile, outfile = args
try:
with open(scriptfile, "r") as f:
nodes = json.load(f)
except Exception:
raise CommandError("Invalid script file: %s" % scriptfile)
if nodes is None:
raise CommandError("No nodes found in script: %s" % scriptfile)
s = script.Script(nodes)
input = s.get_nodes_by_attr("stage", stages.INPUT)[0]
input.set_param("path", infile)
term = s.get_terminals()[0]
sys.stderr.write("Rendering to %s\n" % outfile)
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outfile))])
out.set_input(0, term)
out.eval()
|
<commit_before><commit_msg>Add a management command that runs a given script taking an input file and an output file as args.<commit_after>"""
Run a script that takes a file input/output.
"""
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.utils import simplejson as json
from ocrlab import models, stages, nodes
from nodetree import script, registry
class Command(BaseCommand):
args = "<scriptfile1> <infile> <outfile>"
help = "Run a script on a given file, saving the output."
option_list = BaseCommand.option_list + (
# TODO: Add options...
)
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError("Usage: %s" % self.help)
scriptfile, infile, outfile = args
try:
with open(scriptfile, "r") as f:
nodes = json.load(f)
except Exception:
raise CommandError("Invalid script file: %s" % scriptfile)
if nodes is None:
raise CommandError("No nodes found in script: %s" % scriptfile)
s = script.Script(nodes)
input = s.get_nodes_by_attr("stage", stages.INPUT)[0]
input.set_param("path", infile)
term = s.get_terminals()[0]
sys.stderr.write("Rendering to %s\n" % outfile)
os.environ["NODETREE_WRITE_FILEOUT"] = "1"
out = s.add_node("util.FileOut", "Output",
params=[("path", os.path.abspath(outfile))])
out.set_input(0, term)
out.eval()
|
|
862753353a09400d0d99960ef2cd6d70fb9b4a7e
|
tests/top_destinations_tests.py
|
tests/top_destinations_tests.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicTopDestinations(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_fn_request(self):
res = self.sds.top_destinations('YYZ', theme='beach',
destination_type='INTERNATIONAL',
region='North America')
self.assertIsNotNone(res)
def test_basic_request(self):
options = {
'origin': 'YYZ',
'destinationtype': 'DOMESTIC',
'lookbackweeks': 2,
'topdestinations': 20
}
res = self.sds.top_destinations_opts(options)
self.assertIsNotNone(res)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.instaflights({})
if __name__ == '__main__':
unittest.main()
|
Add tests for top destinations
|
Add tests for top destinations
|
Python
|
mit
|
Jamil/sabre_dev_studio
|
Add tests for top destinations
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicTopDestinations(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_fn_request(self):
res = self.sds.top_destinations('YYZ', theme='beach',
destination_type='INTERNATIONAL',
region='North America')
self.assertIsNotNone(res)
def test_basic_request(self):
options = {
'origin': 'YYZ',
'destinationtype': 'DOMESTIC',
'lookbackweeks': 2,
'topdestinations': 20
}
res = self.sds.top_destinations_opts(options)
self.assertIsNotNone(res)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.instaflights({})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for top destinations<commit_after>
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicTopDestinations(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_fn_request(self):
res = self.sds.top_destinations('YYZ', theme='beach',
destination_type='INTERNATIONAL',
region='North America')
self.assertIsNotNone(res)
def test_basic_request(self):
options = {
'origin': 'YYZ',
'destinationtype': 'DOMESTIC',
'lookbackweeks': 2,
'topdestinations': 20
}
res = self.sds.top_destinations_opts(options)
self.assertIsNotNone(res)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.instaflights({})
if __name__ == '__main__':
unittest.main()
|
Add tests for top destinationsimport unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicTopDestinations(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_fn_request(self):
res = self.sds.top_destinations('YYZ', theme='beach',
destination_type='INTERNATIONAL',
region='North America')
self.assertIsNotNone(res)
def test_basic_request(self):
options = {
'origin': 'YYZ',
'destinationtype': 'DOMESTIC',
'lookbackweeks': 2,
'topdestinations': 20
}
res = self.sds.top_destinations_opts(options)
self.assertIsNotNone(res)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.instaflights({})
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for top destinations<commit_after>import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicTopDestinations(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_fn_request(self):
res = self.sds.top_destinations('YYZ', theme='beach',
destination_type='INTERNATIONAL',
region='North America')
self.assertIsNotNone(res)
def test_basic_request(self):
options = {
'origin': 'YYZ',
'destinationtype': 'DOMESTIC',
'lookbackweeks': 2,
'topdestinations': 20
}
res = self.sds.top_destinations_opts(options)
self.assertIsNotNone(res)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.instaflights({})
if __name__ == '__main__':
unittest.main()
|
|
6825c853892c263a3c193490ee1f620b303aa79d
|
translate_labels.py
|
translate_labels.py
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
import pandas as pd
from emotools.heem_utils import heem_labels_en
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
in_file = os.path.join(input_dir, text_file)
x_data, y_data = load_data(in_file)
labels = [y.split('_') for y in y_data]
#new_labels = []
out_file = os.path.join(output_dir, text_file)
#print out_file
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i in range(len(labels)):
ls = labels[i]
#new_labels.append([heem_labels_en.get(l, 'None') for l in ls])
new_labels = [heem_labels_en.get(l, 'None') for l in ls]
#print ls, new_labels
f.write(u'{}\t{}\n'.format(x_data[i].decode('utf-8'),
'_'.join(new_labels)))
|
Add script to translate labels
|
Add script to translate labels
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to translate labels
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
import pandas as pd
from emotools.heem_utils import heem_labels_en
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
in_file = os.path.join(input_dir, text_file)
x_data, y_data = load_data(in_file)
labels = [y.split('_') for y in y_data]
#new_labels = []
out_file = os.path.join(output_dir, text_file)
#print out_file
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i in range(len(labels)):
ls = labels[i]
#new_labels.append([heem_labels_en.get(l, 'None') for l in ls])
new_labels = [heem_labels_en.get(l, 'None') for l in ls]
#print ls, new_labels
f.write(u'{}\t{}\n'.format(x_data[i].decode('utf-8'),
'_'.join(new_labels)))
|
<commit_before><commit_msg>Add script to translate labels<commit_after>
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
import pandas as pd
from emotools.heem_utils import heem_labels_en
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
in_file = os.path.join(input_dir, text_file)
x_data, y_data = load_data(in_file)
labels = [y.split('_') for y in y_data]
#new_labels = []
out_file = os.path.join(output_dir, text_file)
#print out_file
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i in range(len(labels)):
ls = labels[i]
#new_labels.append([heem_labels_en.get(l, 'None') for l in ls])
new_labels = [heem_labels_en.get(l, 'None') for l in ls]
#print ls, new_labels
f.write(u'{}\t{}\n'.format(x_data[i].decode('utf-8'),
'_'.join(new_labels)))
|
Add script to translate labels"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
import pandas as pd
from emotools.heem_utils import heem_labels_en
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
in_file = os.path.join(input_dir, text_file)
x_data, y_data = load_data(in_file)
labels = [y.split('_') for y in y_data]
#new_labels = []
out_file = os.path.join(output_dir, text_file)
#print out_file
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i in range(len(labels)):
ls = labels[i]
#new_labels.append([heem_labels_en.get(l, 'None') for l in ls])
new_labels = [heem_labels_en.get(l, 'None') for l in ls]
#print ls, new_labels
f.write(u'{}\t{}\n'.format(x_data[i].decode('utf-8'),
'_'.join(new_labels)))
|
<commit_before><commit_msg>Add script to translate labels<commit_after>"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
import pandas as pd
from emotools.heem_utils import heem_labels_en
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
in_file = os.path.join(input_dir, text_file)
x_data, y_data = load_data(in_file)
labels = [y.split('_') for y in y_data]
#new_labels = []
out_file = os.path.join(output_dir, text_file)
#print out_file
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i in range(len(labels)):
ls = labels[i]
#new_labels.append([heem_labels_en.get(l, 'None') for l in ls])
new_labels = [heem_labels_en.get(l, 'None') for l in ls]
#print ls, new_labels
f.write(u'{}\t{}\n'.format(x_data[i].decode('utf-8'),
'_'.join(new_labels)))
|
|
14bc31730b783b1a6649c97a51d1ef9ae2053b42
|
tests/asttools/test_compiler.py
|
tests/asttools/test_compiler.py
|
"""Test suite for asttools.compiler."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pycc.asttools import parse
from pycc.asttools import compiler
source = """
x = True
for y in range(10):
pass
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_bytecode_compiler(node):
"""Ensure that bytecode can be generated without errors."""
compiler.ByteCodeCompiler()(node)
def test_source_code_compilter(node):
"""Ensure that source code can be generated without errors."""
compiler.SourceCodeCompiler()(node)
|
Add simple tests for asttools.compiler module
|
Add simple tests for asttools.compiler module
These tests don't do anything except ensure that exceptions aren't
thrown during compiling.
Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com>
|
Python
|
apache-2.0
|
kevinconway/pycc,kevinconway/pycc
|
Add simple tests for asttools.compiler module
These tests don't do anything except ensure that exceptions aren't
thrown during compiling.
Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com>
|
"""Test suite for asttools.compiler."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pycc.asttools import parse
from pycc.asttools import compiler
source = """
x = True
for y in range(10):
pass
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_bytecode_compiler(node):
"""Ensure that bytecode can be generated without errors."""
compiler.ByteCodeCompiler()(node)
def test_source_code_compilter(node):
"""Ensure that source code can be generated without errors."""
compiler.SourceCodeCompiler()(node)
|
<commit_before><commit_msg>Add simple tests for asttools.compiler module
These tests don't do anything except ensure that exceptions aren't
thrown during compiling.
Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com><commit_after>
|
"""Test suite for asttools.compiler."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pycc.asttools import parse
from pycc.asttools import compiler
source = """
x = True
for y in range(10):
pass
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_bytecode_compiler(node):
"""Ensure that bytecode can be generated without errors."""
compiler.ByteCodeCompiler()(node)
def test_source_code_compilter(node):
"""Ensure that source code can be generated without errors."""
compiler.SourceCodeCompiler()(node)
|
Add simple tests for asttools.compiler module
These tests don't do anything except ensure that exceptions aren't
thrown during compiling.
Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com>"""Test suite for asttools.compiler."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pycc.asttools import parse
from pycc.asttools import compiler
source = """
x = True
for y in range(10):
pass
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_bytecode_compiler(node):
"""Ensure that bytecode can be generated without errors."""
compiler.ByteCodeCompiler()(node)
def test_source_code_compilter(node):
"""Ensure that source code can be generated without errors."""
compiler.SourceCodeCompiler()(node)
|
<commit_before><commit_msg>Add simple tests for asttools.compiler module
These tests don't do anything except ensure that exceptions aren't
thrown during compiling.
Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com><commit_after>"""Test suite for asttools.compiler."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from pycc.asttools import parse
from pycc.asttools import compiler
source = """
x = True
for y in range(10):
pass
"""
@pytest.fixture
def node():
"""Get as AST node from the source."""
return parse.parse(source)
def test_bytecode_compiler(node):
"""Ensure that bytecode can be generated without errors."""
compiler.ByteCodeCompiler()(node)
def test_source_code_compilter(node):
"""Ensure that source code can be generated without errors."""
compiler.SourceCodeCompiler()(node)
|
|
b7274e91bc2fe6a8059ce7e65e02c20d000322b1
|
tests/thread/mutate_instance.py
|
tests/thread/mutate_instance.py
|
# test concurrent mutating access to a shared user instance
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared user class and instance
class User:
def __init__(self):
self.a = 'A'
self.b = 'B'
self.c = 'C'
user = User()
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
setattr(user, 'attr_%u' % i, repeat + i)
assert getattr(user, 'attr_%u' % i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_repeat = 30
n_range = 300
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check user instance has correct contents
print(user.a, user.b, user.c)
for i in range(n_thread * n_range):
assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
|
Add test for concurrent mutating of user instance.
|
tests/thread: Add test for concurrent mutating of user instance.
|
Python
|
mit
|
mhoffma/micropython,ryannathans/micropython,deshipu/micropython,HenrikSolver/micropython,jmarcelino/pycom-micropython,adafruit/micropython,bvernoux/micropython,TDAbboud/micropython,oopy/micropython,emfcamp/micropython,swegener/micropython,Timmenem/micropython,bvernoux/micropython,alex-robbins/micropython,lowRISC/micropython,ganshun666/micropython,cwyark/micropython,HenrikSolver/micropython,MrSurly/micropython,redbear/micropython,pramasoul/micropython,blazewicz/micropython,dxxb/micropython,deshipu/micropython,swegener/micropython,matthewelse/micropython,tralamazza/micropython,dmazzella/micropython,matthewelse/micropython,blazewicz/micropython,dxxb/micropython,alex-robbins/micropython,adafruit/circuitpython,ganshun666/micropython,HenrikSolver/micropython,MrSurly/micropython,PappaPeppar/micropython,mhoffma/micropython,AriZuu/micropython,Peetz0r/micropython-esp32,ryannathans/micropython,alex-march/micropython,pramasoul/micropython,emfcamp/micropython,AriZuu/micropython,matthewelse/micropython,adafruit/circuitpython,PappaPeppar/micropython,lowRISC/micropython,kerneltask/micropython,emfcamp/micropython,kerneltask/micropython,micropython/micropython-esp32,tuc-osg/micropython,pfalcon/micropython,infinnovation/micropython,torwag/micropython,alex-march/micropython,SHA2017-badge/micropython-esp32,alex-march/micropython,tuc-osg/micropython,tralamazza/micropython,SHA2017-badge/micropython-esp32,bvernoux/micropython,micropython/micropython-esp32,henriknelson/micropython,chrisdearman/micropython,toolmacher/micropython,AriZuu/micropython,selste/micropython,ryannathans/micropython,redbear/micropython,tobbad/micropython,oopy/micropython,redbear/micropython,jmarcelino/pycom-micropython,tralamazza/micropython,cwyark/micropython,ganshun666/micropython,AriZuu/micropython,adafruit/circuitpython,deshipu/micropython,selste/micropython,HenrikSolver/micropython,PappaPeppar/micropython,dxxb/micropython,jmarcelino/pycom-micropython,Peetz0r/micropython-esp32,turbinenreiter/micropython,trezor/micropython,pozetroninc/micropython,puuu/micropython,pozetroninc/micropython,torwag/micropython,SHA2017-badge/micropython-esp32,adafruit/micropython,pozetroninc/micropython,toolmacher/micropython,turbinenreiter/micropython,adafruit/micropython,TDAbboud/micropython,torwag/micropython,dmazzella/micropython,infinnovation/micropython,micropython/micropython-esp32,trezor/micropython,trezor/micropython,ganshun666/micropython,hiway/micropython,hiway/micropython,hiway/micropython,pozetroninc/micropython,alex-march/micropython,pfalcon/micropython,MrSurly/micropython-esp32,henriknelson/micropython,pramasoul/micropython,Timmenem/micropython,tobbad/micropython,puuu/micropython,selste/micropython,MrSurly/micropython-esp32,lowRISC/micropython,TDAbboud/micropython,chrisdearman/micropython,adafruit/circuitpython,matthewelse/micropython,deshipu/micropython,pozetroninc/micropython,hosaka/micropython,dmazzella/micropython,blazewicz/micropython,puuu/micropython,TDAbboud/micropython,kerneltask/micropython,turbinenreiter/micropython,selste/micropython,matthewelse/micropython,selste/micropython,tobbad/micropython,henriknelson/micropython,oopy/micropython,micropython/micropython-esp32,pramasoul/micropython,oopy/micropython,turbinenreiter/micropython,PappaPeppar/micropython,TDAbboud/micropython,tuc-osg/micropython,kerneltask/micropython,hiway/micropython,turbinenreiter/micropython,alex-robbins/micropython,pfalcon/micropython,redbear/micropython,HenrikSolver/micropython,henriknelson/micropython,infinnovation/micropython,swegener/micropython,SHA2017-badge/micropython-esp32,tuc-osg/micropython,AriZuu/micropython,Timmenem/micropython,dmazzella/micropython,adafruit/circuitpython,hosaka/micropython,tobbad/micropython,mhoffma/micropython,MrSurly/micropython-esp32,ganshun666/micropython,chrisdearman/micropython,adafruit/micropython,hosaka/micropython,dxxb/micropython,SHA2017-badge/micropython-esp32,toolmacher/micropython,swegener/micropython,mhoffma/micropython,MrSurly/micropython-esp32,henriknelson/micropython,MrSurly/micropython,cwyark/micropython,blazewicz/micropython,Timmenem/micropython,puuu/micropython,cwyark/micropython,Peetz0r/micropython-esp32,jmarcelino/pycom-micropython,puuu/micropython,ryannathans/micropython,bvernoux/micropython,emfcamp/micropython,kerneltask/micropython,trezor/micropython,mhoffma/micropython,blazewicz/micropython,micropython/micropython-esp32,lowRISC/micropython,matthewelse/micropython,pfalcon/micropython,torwag/micropython,Peetz0r/micropython-esp32,MrSurly/micropython-esp32,alex-march/micropython,infinnovation/micropython,chrisdearman/micropython,cwyark/micropython,adafruit/circuitpython,emfcamp/micropython,ryannathans/micropython,Peetz0r/micropython-esp32,dxxb/micropython,swegener/micropython,alex-robbins/micropython,infinnovation/micropython,adafruit/micropython,trezor/micropython,alex-robbins/micropython,hiway/micropython,pfalcon/micropython,oopy/micropython,tralamazza/micropython,jmarcelino/pycom-micropython,tobbad/micropython,bvernoux/micropython,chrisdearman/micropython,tuc-osg/micropython,hosaka/micropython,MrSurly/micropython,torwag/micropython,Timmenem/micropython,MrSurly/micropython,toolmacher/micropython,deshipu/micropython,toolmacher/micropython,PappaPeppar/micropython,pramasoul/micropython,lowRISC/micropython,redbear/micropython,hosaka/micropython
|
tests/thread: Add test for concurrent mutating of user instance.
|
# test concurrent mutating access to a shared user instance
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared user class and instance
class User:
def __init__(self):
self.a = 'A'
self.b = 'B'
self.c = 'C'
user = User()
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
setattr(user, 'attr_%u' % i, repeat + i)
assert getattr(user, 'attr_%u' % i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_repeat = 30
n_range = 300
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check user instance has correct contents
print(user.a, user.b, user.c)
for i in range(n_thread * n_range):
assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
|
<commit_before><commit_msg>tests/thread: Add test for concurrent mutating of user instance.<commit_after>
|
# test concurrent mutating access to a shared user instance
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared user class and instance
class User:
def __init__(self):
self.a = 'A'
self.b = 'B'
self.c = 'C'
user = User()
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
setattr(user, 'attr_%u' % i, repeat + i)
assert getattr(user, 'attr_%u' % i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_repeat = 30
n_range = 300
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check user instance has correct contents
print(user.a, user.b, user.c)
for i in range(n_thread * n_range):
assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
|
tests/thread: Add test for concurrent mutating of user instance.# test concurrent mutating access to a shared user instance
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared user class and instance
class User:
def __init__(self):
self.a = 'A'
self.b = 'B'
self.c = 'C'
user = User()
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
setattr(user, 'attr_%u' % i, repeat + i)
assert getattr(user, 'attr_%u' % i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_repeat = 30
n_range = 300
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check user instance has correct contents
print(user.a, user.b, user.c)
for i in range(n_thread * n_range):
assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
|
<commit_before><commit_msg>tests/thread: Add test for concurrent mutating of user instance.<commit_after># test concurrent mutating access to a shared user instance
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared user class and instance
class User:
def __init__(self):
self.a = 'A'
self.b = 'B'
self.c = 'C'
user = User()
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
setattr(user, 'attr_%u' % i, repeat + i)
assert getattr(user, 'attr_%u' % i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_repeat = 30
n_range = 300
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (n_repeat, i * n_range, (i + 1) * n_range))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check user instance has correct contents
print(user.a, user.b, user.c)
for i in range(n_thread * n_range):
assert getattr(user, 'attr_%u' % i) == n_repeat - 1 + i
|
|
05084740393180994ea0b330c7f91a148a5d7d86
|
util/checkZscale.py
|
util/checkZscale.py
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcarest
def main():
parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
parser.add_argument('dataset', action="store", help='Dataset name for the project')
result = parser.parse_args()
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
for res in proj.datasetcfg.resolutions:
print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
if __name__ == "__main__":
main()
|
Check the zscale and cubedims for a given database
|
[util] Check the zscale and cubedims for a given database
|
Python
|
apache-2.0
|
openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore,openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore,neurodata/ndstore,openconnectome/open-connectome,neurodata/ndstore,openconnectome/open-connectome
|
[util] Check the zscale and cubedims for a given database
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcarest
def main():
parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
parser.add_argument('dataset', action="store", help='Dataset name for the project')
result = parser.parse_args()
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
for res in proj.datasetcfg.resolutions:
print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>[util] Check the zscale and cubedims for a given database<commit_after>
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcarest
def main():
parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
parser.add_argument('dataset', action="store", help='Dataset name for the project')
result = parser.parse_args()
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
for res in proj.datasetcfg.resolutions:
print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
if __name__ == "__main__":
main()
|
[util] Check the zscale and cubedims for a given database# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcarest
def main():
parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
parser.add_argument('dataset', action="store", help='Dataset name for the project')
result = parser.parse_args()
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
for res in proj.datasetcfg.resolutions:
print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>[util] Check the zscale and cubedims for a given database<commit_after># Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcarest
def main():
parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
parser.add_argument('dataset', action="store", help='Dataset name for the project')
result = parser.parse_args()
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
for res in proj.datasetcfg.resolutions:
print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
if __name__ == "__main__":
main()
|
|
59f40f5b7683d60264f5c38a5e0da7810fa67fd8
|
cross-multinomial/cross_multinomial_predict.py
|
cross-multinomial/cross_multinomial_predict.py
|
import sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in len(locus_log_probs)]
sortable.sort()
fl = open(output_fl, "w")
for log_prob, i in sortable:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
|
Add prediction script for cross multinomial
|
Add prediction script for cross multinomial
|
Python
|
apache-2.0
|
rnowling/pop-gen-models
|
Add prediction script for cross multinomial
|
import sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in len(locus_log_probs)]
sortable.sort()
fl = open(output_fl, "w")
for log_prob, i in sortable:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
|
<commit_before><commit_msg>Add prediction script for cross multinomial<commit_after>
|
import sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in len(locus_log_probs)]
sortable.sort()
fl = open(output_fl, "w")
for log_prob, i in sortable:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
|
Add prediction script for cross multinomialimport sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in len(locus_log_probs)]
sortable.sort()
fl = open(output_fl, "w")
for log_prob, i in sortable:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
|
<commit_before><commit_msg>Add prediction script for cross multinomial<commit_after>import sys
import numpy as np
import numpy.random as npr
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
from scipy.special import gammaln
import matplotlib.pyplot as plt
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
def locus_prob(locus_obs_counts, locus_freq):
log_prob = 0.0
n_pop = locus_obs_counts.shape[0]
for p1 in xrange(n_pop):
for p2 in xrange(n_pop):
log_prob += log_multinomial(locus_obs_counts[p1], locus_freq[p2])
return log_prob
def probability(observed_counts):
observed_frequencies = normalize_haplotypes(observed_counts)
n_loci = observed_counts.shape[0]
locus_probabilities = np.zeros(n_loci)
for locus in xrange(n_loci):
prob = locus_prob(observed_counts[locus, :, :], observed_frequencies[locus, :, :])
locus_probabilities[locus] = prob
return locus_probabilities
def main(occur_fl, output_fl):
observed_counts = read_counts(occur_fl)
print observed_counts.shape
locus_log_probs = probability(observed_counts)
sortable = [(locus_log_probs[i], i) for i in len(locus_log_probs)]
sortable.sort()
fl = open(output_fl, "w")
for log_prob, i in sortable:
fl.write("%s %s\n" % (i, log_prob))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
output_fl = sys.argv[2]
main(occur_fl, output_fl)
|
|
0ffeb772e52c2af555c5fe65f4763059edb31148
|
tensor2tensor/models/research/transformer_vae_test.py
|
tensor2tensor/models/research/transformer_vae_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
Add a test for transformer_vae
|
Add a test for transformer_vae
PiperOrigin-RevId: 190462534
|
Python
|
apache-2.0
|
tensorflow/tensor2tensor,tensorflow/tensor2tensor,vthorsteinsson/tensor2tensor,tensorflow/tensor2tensor,vthorsteinsson/tensor2tensor,tensorflow/tensor2tensor,tensorflow/tensor2tensor,vthorsteinsson/tensor2tensor,vthorsteinsson/tensor2tensor
|
Add a test for transformer_vae
PiperOrigin-RevId: 190462534
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Add a test for transformer_vae
PiperOrigin-RevId: 190462534<commit_after>
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
Add a test for transformer_vae
PiperOrigin-RevId: 190462534# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Add a test for transformer_vae
PiperOrigin-RevId: 190462534<commit_after># coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.models.research.transformer_vae."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models.research import transformer_vae
import tensorflow as tf
class TransformerVaeTest(tf.test.TestCase):
def testTransformerAEOnDVQ(self):
batch_size = 3
input_length = 5
target_length = 16
vocab_size = 9
hparams = transformer_vae.transformer_ae_small()
hparams.bottleneck_kind = "dvq"
hparams.dp_strength = 0
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
hparams.problems = [p_hparams]
inputs = -1 + np.random.random_integers(
vocab_size, size=(batch_size, input_length, 1, 1))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, target_length, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
tf.train.create_global_step()
model = transformer_vae.TransformerAE(hparams, tf.estimator.ModeKeys.TRAIN,
p_hparams)
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
logits_val = session.run(logits)
self.assertEqual(logits_val.shape,
(batch_size, target_length, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
|
|
3ab745087ef563e30f86fdeed09b12868095f85d
|
tests/test_classifierbase.py
|
tests/test_classifierbase.py
|
from itertools import izip, cycle
from unittest import TestCase
class TestClassifierBase(TestCase):
def _training(self, classification_type, classifier, alphabet,
reversed_alphabet):
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
classifier.train(classification_type, message, is_spam)
|
Add a base class for certain types of tests
|
Add a base class for certain types of tests
This is so training data can be reused across classes.
In fact nonsensefilter's test have been using this for quite some time
but this was not committed. Oops!
|
Python
|
mpl-2.0
|
mozilla/spicedham,mozilla/spicedham
|
Add a base class for certain types of tests
This is so training data can be reused across classes.
In fact nonsensefilter's test have been using this for quite some time
but this was not committed. Oops!
|
from itertools import izip, cycle
from unittest import TestCase
class TestClassifierBase(TestCase):
def _training(self, classification_type, classifier, alphabet,
reversed_alphabet):
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
classifier.train(classification_type, message, is_spam)
|
<commit_before><commit_msg>Add a base class for certain types of tests
This is so training data can be reused across classes.
In fact nonsensefilter's test have been using this for quite some time
but this was not committed. Oops!<commit_after>
|
from itertools import izip, cycle
from unittest import TestCase
class TestClassifierBase(TestCase):
def _training(self, classification_type, classifier, alphabet,
reversed_alphabet):
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
classifier.train(classification_type, message, is_spam)
|
Add a base class for certain types of tests
This is so training data can be reused across classes.
In fact nonsensefilter's test have been using this for quite some time
but this was not committed. Oops!from itertools import izip, cycle
from unittest import TestCase
class TestClassifierBase(TestCase):
def _training(self, classification_type, classifier, alphabet,
reversed_alphabet):
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
classifier.train(classification_type, message, is_spam)
|
<commit_before><commit_msg>Add a base class for certain types of tests
This is so training data can be reused across classes.
In fact nonsensefilter's test have been using this for quite some time
but this was not committed. Oops!<commit_after>from itertools import izip, cycle
from unittest import TestCase
class TestClassifierBase(TestCase):
def _training(self, classification_type, classifier, alphabet,
reversed_alphabet):
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
classifier.train(classification_type, message, is_spam)
|
|
1e3d9f01d25d89ac6167226a24e03b79d0c34a3c
|
test/trainer_test.py
|
test/trainer_test.py
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, *args, **kwargs):
self.exp.add_trainer(*args, **kwargs)
trainer = self.exp.train(self.images, self.images)
costs0 = next(trainer)
costs1 = next(trainer)
assert costs1['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
Add basic "does this even run" test for trainers.
|
Add basic "does this even run" test for trainers.
|
Python
|
mit
|
lmjohns3/theanets,chrinide/theanets,devdoer/theanets
|
Add basic "does this even run" test for trainers.
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, *args, **kwargs):
self.exp.add_trainer(*args, **kwargs)
trainer = self.exp.train(self.images, self.images)
costs0 = next(trainer)
costs1 = next(trainer)
assert costs1['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before><commit_msg>Add basic "does this even run" test for trainers.<commit_after>
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, *args, **kwargs):
self.exp.add_trainer(*args, **kwargs)
trainer = self.exp.train(self.images, self.images)
costs0 = next(trainer)
costs1 = next(trainer)
assert costs1['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
Add basic "does this even run" test for trainers.import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, *args, **kwargs):
self.exp.add_trainer(*args, **kwargs)
trainer = self.exp.train(self.images, self.images)
costs0 = next(trainer)
costs1 = next(trainer)
assert costs1['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before><commit_msg>Add basic "does this even run" test for trainers.<commit_after>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, *args, **kwargs):
self.exp.add_trainer(*args, **kwargs)
trainer = self.exp.train(self.images, self.images)
costs0 = next(trainer)
costs1 = next(trainer)
assert costs1['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
|
916053366034f857ad431bff069121e56c4e027b
|
pylearn2/scripts/tests/test_show_examples.py
|
pylearn2/scripts/tests/test_show_examples.py
|
"""
Tests for the show_examples.py script
"""
import os
from pylearn2.testing.skip import skip_if_no_matplotlib, skip_if_no_data
from pylearn2.scripts.show_examples import show_examples
def test_show_examples():
"""
Create a YAML file of the MNIST dataset and show examples
"""
skip_if_no_matplotlib()
skip_if_no_data()
with open('temp.yaml', 'w') as f:
f.write("""
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
""")
show_examples('temp.yaml', 28, 28, out='garbage.png')
os.remove('temp.yaml')
|
Add unit test for show_examples.py
|
Add unit test for show_examples.py
|
Python
|
bsd-3-clause
|
cosmoharrigan/pylearn2,skearnes/pylearn2,matrogers/pylearn2,ashhher3/pylearn2,w1kke/pylearn2,JesseLivezey/plankton,CIFASIS/pylearn2,lancezlin/pylearn2,alexjc/pylearn2,jamessergeant/pylearn2,theoryno3/pylearn2,ashhher3/pylearn2,Refefer/pylearn2,lamblin/pylearn2,alexjc/pylearn2,nouiz/pylearn2,fishcorn/pylearn2,msingh172/pylearn2,jamessergeant/pylearn2,fyffyt/pylearn2,Refefer/pylearn2,goodfeli/pylearn2,cosmoharrigan/pylearn2,KennethPierce/pylearnk,jamessergeant/pylearn2,TNick/pylearn2,kose-y/pylearn2,skearnes/pylearn2,ddboline/pylearn2,jeremyfix/pylearn2,goodfeli/pylearn2,jeremyfix/pylearn2,se4u/pylearn2,matrogers/pylearn2,ashhher3/pylearn2,KennethPierce/pylearnk,msingh172/pylearn2,lamblin/pylearn2,mclaughlin6464/pylearn2,jamessergeant/pylearn2,goodfeli/pylearn2,pkainz/pylearn2,JesseLivezey/pylearn2,abergeron/pylearn2,shiquanwang/pylearn2,se4u/pylearn2,woozzu/pylearn2,lisa-lab/pylearn2,fulmicoton/pylearn2,se4u/pylearn2,caidongyun/pylearn2,hantek/pylearn2,JesseLivezey/pylearn2,abergeron/pylearn2,pombredanne/pylearn2,chrish42/pylearn,junbochen/pylearn2,hyqneuron/pylearn2-maxsom,lisa-lab/pylearn2,ddboline/pylearn2,lancezlin/pylearn2,lunyang/pylearn2,lunyang/pylearn2,alexjc/pylearn2,fyffyt/pylearn2,fulmicoton/pylearn2,chrish42/pylearn,pkainz/pylearn2,hantek/pylearn2,CIFASIS/pylearn2,shiquanwang/pylearn2,caidongyun/pylearn2,theoryno3/pylearn2,nouiz/pylearn2,lamblin/pylearn2,ddboline/pylearn2,pkainz/pylearn2,fishcorn/pylearn2,lunyang/pylearn2,alexjc/pylearn2,TNick/pylearn2,bartvm/pylearn2,hyqneuron/pylearn2-maxsom,cosmoharrigan/pylearn2,nouiz/pylearn2,pombredanne/pylearn2,chrish42/pylearn,jeremyfix/pylearn2,mkraemer67/pylearn2,bartvm/pylearn2,pkainz/pylearn2,hyqneuron/pylearn2-maxsom,JesseLivezey/pylearn2,chrish42/pylearn,bartvm/pylearn2,bartvm/pylearn2,daemonmaker/pylearn2,ddboline/pylearn2,mclaughlin6464/pylearn2,JesseLivezey/plankton,matrogers/pylearn2,fyffyt/pylearn2,pombredanne/pylearn2,JesseLivezey/plankton,skearnes/pylearn2,JesseLivezey/plankton,KennethPierce/pylearnk,sandeepkbhat/pylearn2,w1kke/pylearn2,kastnerkyle/pylearn2,fulmicoton/pylearn2,junbochen/pylearn2,skearnes/pylearn2,lamblin/pylearn2,fishcorn/pylearn2,aalmah/pylearn2,theoryno3/pylearn2,sandeepkbhat/pylearn2,woozzu/pylearn2,Refefer/pylearn2,matrogers/pylearn2,aalmah/pylearn2,w1kke/pylearn2,daemonmaker/pylearn2,kose-y/pylearn2,Refefer/pylearn2,daemonmaker/pylearn2,mkraemer67/pylearn2,pombredanne/pylearn2,fulmicoton/pylearn2,CIFASIS/pylearn2,mclaughlin6464/pylearn2,lancezlin/pylearn2,hyqneuron/pylearn2-maxsom,daemonmaker/pylearn2,fishcorn/pylearn2,caidongyun/pylearn2,msingh172/pylearn2,KennethPierce/pylearnk,lancezlin/pylearn2,ashhher3/pylearn2,jeremyfix/pylearn2,mclaughlin6464/pylearn2,hantek/pylearn2,abergeron/pylearn2,kastnerkyle/pylearn2,junbochen/pylearn2,shiquanwang/pylearn2,junbochen/pylearn2,msingh172/pylearn2,shiquanwang/pylearn2,lisa-lab/pylearn2,CIFASIS/pylearn2,aalmah/pylearn2,fyffyt/pylearn2,kose-y/pylearn2,abergeron/pylearn2,kastnerkyle/pylearn2,hantek/pylearn2,TNick/pylearn2,woozzu/pylearn2,mkraemer67/pylearn2,goodfeli/pylearn2,woozzu/pylearn2,theoryno3/pylearn2,kastnerkyle/pylearn2,kose-y/pylearn2,TNick/pylearn2,mkraemer67/pylearn2,w1kke/pylearn2,sandeepkbhat/pylearn2,lisa-lab/pylearn2,nouiz/pylearn2,lunyang/pylearn2,JesseLivezey/pylearn2,caidongyun/pylearn2,se4u/pylearn2,aalmah/pylearn2,sandeepkbhat/pylearn2,cosmoharrigan/pylearn2
|
Add unit test for show_examples.py
|
"""
Tests for the show_examples.py script
"""
import os
from pylearn2.testing.skip import skip_if_no_matplotlib, skip_if_no_data
from pylearn2.scripts.show_examples import show_examples
def test_show_examples():
"""
Create a YAML file of the MNIST dataset and show examples
"""
skip_if_no_matplotlib()
skip_if_no_data()
with open('temp.yaml', 'w') as f:
f.write("""
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
""")
show_examples('temp.yaml', 28, 28, out='garbage.png')
os.remove('temp.yaml')
|
<commit_before><commit_msg>Add unit test for show_examples.py<commit_after>
|
"""
Tests for the show_examples.py script
"""
import os
from pylearn2.testing.skip import skip_if_no_matplotlib, skip_if_no_data
from pylearn2.scripts.show_examples import show_examples
def test_show_examples():
"""
Create a YAML file of the MNIST dataset and show examples
"""
skip_if_no_matplotlib()
skip_if_no_data()
with open('temp.yaml', 'w') as f:
f.write("""
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
""")
show_examples('temp.yaml', 28, 28, out='garbage.png')
os.remove('temp.yaml')
|
Add unit test for show_examples.py"""
Tests for the show_examples.py script
"""
import os
from pylearn2.testing.skip import skip_if_no_matplotlib, skip_if_no_data
from pylearn2.scripts.show_examples import show_examples
def test_show_examples():
"""
Create a YAML file of the MNIST dataset and show examples
"""
skip_if_no_matplotlib()
skip_if_no_data()
with open('temp.yaml', 'w') as f:
f.write("""
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
""")
show_examples('temp.yaml', 28, 28, out='garbage.png')
os.remove('temp.yaml')
|
<commit_before><commit_msg>Add unit test for show_examples.py<commit_after>"""
Tests for the show_examples.py script
"""
import os
from pylearn2.testing.skip import skip_if_no_matplotlib, skip_if_no_data
from pylearn2.scripts.show_examples import show_examples
def test_show_examples():
"""
Create a YAML file of the MNIST dataset and show examples
"""
skip_if_no_matplotlib()
skip_if_no_data()
with open('temp.yaml', 'w') as f:
f.write("""
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
""")
show_examples('temp.yaml', 28, 28, out='garbage.png')
os.remove('temp.yaml')
|
|
9dcb5ee0738e339f3d17876d5470b6627393862c
|
tensorflow_cloud/python/tests/integration/on_script_call_test.py
|
tensorflow_cloud/python/tests/integration/on_script_call_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for calling tfc.run on a script with keras."""
import os
import sys
from unittest import mock
import tensorflow as tf
import tensorflow_cloud as tfc
# Following are the env varialbes avaialabe in test infrastructure:
#
# The staging bucket to use for cloudbuild as well as save the model and data.
# TEST_BUCKET = os.environ['TEST_BUCKET']
#
# The project id to use to run tests.
# PROJECT_ID = os.environ['PROJECT_ID']
#
# The GCP region in which the end-to-end test is run.
# REGION = os.environ['REGION']
#
# Unique ID for this build, can be used as a label for AI Platform training job.
# BUILD_ID = os.environ['BUILD_ID']
class TensorflowCloudOnScriptTest(tf.test.TestCase):
def setUp(self):
super(TensorflowCloudOnScriptTest, self).setUp()
# To keep track of content that needs to be deleted in teardown clean up
self.test_folders = []
self.test_data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../testdata/"
)
def tearDown(self):
super(TensorflowCloudOnScriptTest, self).tearDown()
# Clean up any temporary file or folder created during testing.
for folder in self.test_folders:
self.delete_dir(folder)
def delete_dir(self, path: Text) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
@mock.patch.object(sys, "exit", autospec=True)
def test_MWMS_on_script(self, mock_exit):
tfc.run(
entry_point=os.path.join(
self.test_data_path, "mnist_example_using_ctl.py"
),
distribution_strategy=None,
worker_count=1,
requirements_txt=os.path.join(
self.test_data_path, "requirements.txt"),
)
mock_exit.assert_called_once_with(0)
if __name__ == "__main__":
tf.test.main()
|
Add details to integration test as a template
|
Add details to integration test as a template
|
Python
|
apache-2.0
|
tensorflow/cloud,tensorflow/cloud
|
Add details to integration test as a template
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for calling tfc.run on a script with keras."""
import os
import sys
from unittest import mock
import tensorflow as tf
import tensorflow_cloud as tfc
# Following are the env varialbes avaialabe in test infrastructure:
#
# The staging bucket to use for cloudbuild as well as save the model and data.
# TEST_BUCKET = os.environ['TEST_BUCKET']
#
# The project id to use to run tests.
# PROJECT_ID = os.environ['PROJECT_ID']
#
# The GCP region in which the end-to-end test is run.
# REGION = os.environ['REGION']
#
# Unique ID for this build, can be used as a label for AI Platform training job.
# BUILD_ID = os.environ['BUILD_ID']
class TensorflowCloudOnScriptTest(tf.test.TestCase):
def setUp(self):
super(TensorflowCloudOnScriptTest, self).setUp()
# To keep track of content that needs to be deleted in teardown clean up
self.test_folders = []
self.test_data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../testdata/"
)
def tearDown(self):
super(TensorflowCloudOnScriptTest, self).tearDown()
# Clean up any temporary file or folder created during testing.
for folder in self.test_folders:
self.delete_dir(folder)
def delete_dir(self, path: Text) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
@mock.patch.object(sys, "exit", autospec=True)
def test_MWMS_on_script(self, mock_exit):
tfc.run(
entry_point=os.path.join(
self.test_data_path, "mnist_example_using_ctl.py"
),
distribution_strategy=None,
worker_count=1,
requirements_txt=os.path.join(
self.test_data_path, "requirements.txt"),
)
mock_exit.assert_called_once_with(0)
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Add details to integration test as a template<commit_after>
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for calling tfc.run on a script with keras."""
import os
import sys
from unittest import mock
import tensorflow as tf
import tensorflow_cloud as tfc
# Following are the env varialbes avaialabe in test infrastructure:
#
# The staging bucket to use for cloudbuild as well as save the model and data.
# TEST_BUCKET = os.environ['TEST_BUCKET']
#
# The project id to use to run tests.
# PROJECT_ID = os.environ['PROJECT_ID']
#
# The GCP region in which the end-to-end test is run.
# REGION = os.environ['REGION']
#
# Unique ID for this build, can be used as a label for AI Platform training job.
# BUILD_ID = os.environ['BUILD_ID']
class TensorflowCloudOnScriptTest(tf.test.TestCase):
def setUp(self):
super(TensorflowCloudOnScriptTest, self).setUp()
# To keep track of content that needs to be deleted in teardown clean up
self.test_folders = []
self.test_data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../testdata/"
)
def tearDown(self):
super(TensorflowCloudOnScriptTest, self).tearDown()
# Clean up any temporary file or folder created during testing.
for folder in self.test_folders:
self.delete_dir(folder)
def delete_dir(self, path: Text) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
@mock.patch.object(sys, "exit", autospec=True)
def test_MWMS_on_script(self, mock_exit):
tfc.run(
entry_point=os.path.join(
self.test_data_path, "mnist_example_using_ctl.py"
),
distribution_strategy=None,
worker_count=1,
requirements_txt=os.path.join(
self.test_data_path, "requirements.txt"),
)
mock_exit.assert_called_once_with(0)
if __name__ == "__main__":
tf.test.main()
|
Add details to integration test as a template# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for calling tfc.run on a script with keras."""
import os
import sys
from unittest import mock
import tensorflow as tf
import tensorflow_cloud as tfc
# Following are the env varialbes avaialabe in test infrastructure:
#
# The staging bucket to use for cloudbuild as well as save the model and data.
# TEST_BUCKET = os.environ['TEST_BUCKET']
#
# The project id to use to run tests.
# PROJECT_ID = os.environ['PROJECT_ID']
#
# The GCP region in which the end-to-end test is run.
# REGION = os.environ['REGION']
#
# Unique ID for this build, can be used as a label for AI Platform training job.
# BUILD_ID = os.environ['BUILD_ID']
class TensorflowCloudOnScriptTest(tf.test.TestCase):
def setUp(self):
super(TensorflowCloudOnScriptTest, self).setUp()
# To keep track of content that needs to be deleted in teardown clean up
self.test_folders = []
self.test_data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../testdata/"
)
def tearDown(self):
super(TensorflowCloudOnScriptTest, self).tearDown()
# Clean up any temporary file or folder created during testing.
for folder in self.test_folders:
self.delete_dir(folder)
def delete_dir(self, path: Text) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
@mock.patch.object(sys, "exit", autospec=True)
def test_MWMS_on_script(self, mock_exit):
tfc.run(
entry_point=os.path.join(
self.test_data_path, "mnist_example_using_ctl.py"
),
distribution_strategy=None,
worker_count=1,
requirements_txt=os.path.join(
self.test_data_path, "requirements.txt"),
)
mock_exit.assert_called_once_with(0)
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Add details to integration test as a template<commit_after># Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for calling tfc.run on a script with keras."""
import os
import sys
from unittest import mock
import tensorflow as tf
import tensorflow_cloud as tfc
# Following are the env varialbes avaialabe in test infrastructure:
#
# The staging bucket to use for cloudbuild as well as save the model and data.
# TEST_BUCKET = os.environ['TEST_BUCKET']
#
# The project id to use to run tests.
# PROJECT_ID = os.environ['PROJECT_ID']
#
# The GCP region in which the end-to-end test is run.
# REGION = os.environ['REGION']
#
# Unique ID for this build, can be used as a label for AI Platform training job.
# BUILD_ID = os.environ['BUILD_ID']
class TensorflowCloudOnScriptTest(tf.test.TestCase):
def setUp(self):
super(TensorflowCloudOnScriptTest, self).setUp()
# To keep track of content that needs to be deleted in teardown clean up
self.test_folders = []
self.test_data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../testdata/"
)
def tearDown(self):
super(TensorflowCloudOnScriptTest, self).tearDown()
# Clean up any temporary file or folder created during testing.
for folder in self.test_folders:
self.delete_dir(folder)
def delete_dir(self, path: Text) -> None:
"""Deletes a directory if exists."""
if tf.io.gfile.isdir(path):
tf.io.gfile.rmtree(path)
@mock.patch.object(sys, "exit", autospec=True)
def test_MWMS_on_script(self, mock_exit):
tfc.run(
entry_point=os.path.join(
self.test_data_path, "mnist_example_using_ctl.py"
),
distribution_strategy=None,
worker_count=1,
requirements_txt=os.path.join(
self.test_data_path, "requirements.txt"),
)
mock_exit.assert_called_once_with(0)
if __name__ == "__main__":
tf.test.main()
|
|
e09dc29716f65b5e36436538df211363c9d874ab
|
vm_server/send/compile_proto.py
|
vm_server/send/compile_proto.py
|
#!/usr/bin/python
"""Script to create the protofile
It compiles the proto definition and
then creates the proto file from the text specified
in query1.txt, query2.txt etc
"""
import logging
import os
import sys
def compile_proto():
"""Executes commands to compile a proto file"""
logging.debug("Compile proto")
os.system("protoc --python_out=.\\proto\\ .\\proto\\Request.proto")
def create_proto():
"""Executes commands to create a proto file"""
logging.debug("Creating proto file from" + sys.argv[1])
os.system("python .\\proto\\create_proto.py .\\proto\\" + sys.argv[1])
if __name__ == "__main__":
logging.basicConfig(filename = "response.log", level = logging.DEBUG)
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
logging.debug("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
sys.exit(-1)
compile_proto()
create_proto()
|
Add code to compile proto
|
Add code to compile proto
|
Python
|
apache-2.0
|
googleinterns/automated-windows-vms,googleinterns/automated-windows-vms
|
Add code to compile proto
|
#!/usr/bin/python
"""Script to create the protofile
It compiles the proto definition and
then creates the proto file from the text specified
in query1.txt, query2.txt etc
"""
import logging
import os
import sys
def compile_proto():
"""Executes commands to compile a proto file"""
logging.debug("Compile proto")
os.system("protoc --python_out=.\\proto\\ .\\proto\\Request.proto")
def create_proto():
"""Executes commands to create a proto file"""
logging.debug("Creating proto file from" + sys.argv[1])
os.system("python .\\proto\\create_proto.py .\\proto\\" + sys.argv[1])
if __name__ == "__main__":
logging.basicConfig(filename = "response.log", level = logging.DEBUG)
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
logging.debug("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
sys.exit(-1)
compile_proto()
create_proto()
|
<commit_before><commit_msg>Add code to compile proto<commit_after>
|
#!/usr/bin/python
"""Script to create the protofile
It compiles the proto definition and
then creates the proto file from the text specified
in query1.txt, query2.txt etc
"""
import logging
import os
import sys
def compile_proto():
"""Executes commands to compile a proto file"""
logging.debug("Compile proto")
os.system("protoc --python_out=.\\proto\\ .\\proto\\Request.proto")
def create_proto():
"""Executes commands to create a proto file"""
logging.debug("Creating proto file from" + sys.argv[1])
os.system("python .\\proto\\create_proto.py .\\proto\\" + sys.argv[1])
if __name__ == "__main__":
logging.basicConfig(filename = "response.log", level = logging.DEBUG)
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
logging.debug("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
sys.exit(-1)
compile_proto()
create_proto()
|
Add code to compile proto#!/usr/bin/python
"""Script to create the protofile
It compiles the proto definition and
then creates the proto file from the text specified
in query1.txt, query2.txt etc
"""
import logging
import os
import sys
def compile_proto():
"""Executes commands to compile a proto file"""
logging.debug("Compile proto")
os.system("protoc --python_out=.\\proto\\ .\\proto\\Request.proto")
def create_proto():
"""Executes commands to create a proto file"""
logging.debug("Creating proto file from" + sys.argv[1])
os.system("python .\\proto\\create_proto.py .\\proto\\" + sys.argv[1])
if __name__ == "__main__":
logging.basicConfig(filename = "response.log", level = logging.DEBUG)
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
logging.debug("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
sys.exit(-1)
compile_proto()
create_proto()
|
<commit_before><commit_msg>Add code to compile proto<commit_after>#!/usr/bin/python
"""Script to create the protofile
It compiles the proto definition and
then creates the proto file from the text specified
in query1.txt, query2.txt etc
"""
import logging
import os
import sys
def compile_proto():
"""Executes commands to compile a proto file"""
logging.debug("Compile proto")
os.system("protoc --python_out=.\\proto\\ .\\proto\\Request.proto")
def create_proto():
"""Executes commands to create a proto file"""
logging.debug("Creating proto file from" + sys.argv[1])
os.system("python .\\proto\\create_proto.py .\\proto\\" + sys.argv[1])
if __name__ == "__main__":
logging.basicConfig(filename = "response.log", level = logging.DEBUG)
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
logging.debug("Usage:", sys.argv[0], "QUERY_TEXT_FILE")
sys.exit(-1)
compile_proto()
create_proto()
|
|
3cc76b052ea7a49ad50247bff218f34056470f31
|
accelerator/migrations/0033_migrate_gender_data.py
|
accelerator/migrations/0033_migrate_gender_data.py
|
# Generated by Django 2.2.10 on 2021-01-22 12:13
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = "m"
FEMALE_CHOICE = "f"
OTHER_CHOICE = "o"
PREFER_NOT_TO_STATE_CHOICE = "p"
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model("accelerator", "GenderChoices")
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity_data(model, gender_choices):
ThroughModel = model.gender_identity.through
for gender, gender_identity in gender_map.items():
gender_choice = gender_choices[gender_identity]
profiles = model.objects.filter(gender=gender)
ThroughModel.objects.bulk_create([
ThroughModel(**{
f"{model.__name__.lower()}": profile,
"genderchoices": gender_choices[gender_identity]
})
for profile in profiles if not profile.gender_identity.filter(
pk=gender_choice.pk).exists()
], ignore_conflicts=True)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
gender_choices = get_gender_choice_obj_dict(apps)
MemberProfile = apps.get_model("accelerator", "MemberProfile")
ExpertProfile = apps.get_model("accelerator", "ExpertProfile")
EntrepreneurProfile = apps.get_model("accelerator", "EntrepreneurProfile")
for model in [EntrepreneurProfile, ExpertProfile, MemberProfile]:
add_gender_identity_data(model, gender_choices)
class Migration(migrations.Migration):
dependencies = [
("accelerator", "0032_add_ethno_racial_identity_data"),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
)
]
|
Merge remote-tracking branch 'origin' into AC-8354
|
[AC-8354] Merge remote-tracking branch 'origin' into AC-8354
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-8354] Merge remote-tracking branch 'origin' into AC-8354
|
# Generated by Django 2.2.10 on 2021-01-22 12:13
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = "m"
FEMALE_CHOICE = "f"
OTHER_CHOICE = "o"
PREFER_NOT_TO_STATE_CHOICE = "p"
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model("accelerator", "GenderChoices")
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity_data(model, gender_choices):
ThroughModel = model.gender_identity.through
for gender, gender_identity in gender_map.items():
gender_choice = gender_choices[gender_identity]
profiles = model.objects.filter(gender=gender)
ThroughModel.objects.bulk_create([
ThroughModel(**{
f"{model.__name__.lower()}": profile,
"genderchoices": gender_choices[gender_identity]
})
for profile in profiles if not profile.gender_identity.filter(
pk=gender_choice.pk).exists()
], ignore_conflicts=True)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
gender_choices = get_gender_choice_obj_dict(apps)
MemberProfile = apps.get_model("accelerator", "MemberProfile")
ExpertProfile = apps.get_model("accelerator", "ExpertProfile")
EntrepreneurProfile = apps.get_model("accelerator", "EntrepreneurProfile")
for model in [EntrepreneurProfile, ExpertProfile, MemberProfile]:
add_gender_identity_data(model, gender_choices)
class Migration(migrations.Migration):
dependencies = [
("accelerator", "0032_add_ethno_racial_identity_data"),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
)
]
|
<commit_before><commit_msg>[AC-8354] Merge remote-tracking branch 'origin' into AC-8354<commit_after>
|
# Generated by Django 2.2.10 on 2021-01-22 12:13
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = "m"
FEMALE_CHOICE = "f"
OTHER_CHOICE = "o"
PREFER_NOT_TO_STATE_CHOICE = "p"
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model("accelerator", "GenderChoices")
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity_data(model, gender_choices):
ThroughModel = model.gender_identity.through
for gender, gender_identity in gender_map.items():
gender_choice = gender_choices[gender_identity]
profiles = model.objects.filter(gender=gender)
ThroughModel.objects.bulk_create([
ThroughModel(**{
f"{model.__name__.lower()}": profile,
"genderchoices": gender_choices[gender_identity]
})
for profile in profiles if not profile.gender_identity.filter(
pk=gender_choice.pk).exists()
], ignore_conflicts=True)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
gender_choices = get_gender_choice_obj_dict(apps)
MemberProfile = apps.get_model("accelerator", "MemberProfile")
ExpertProfile = apps.get_model("accelerator", "ExpertProfile")
EntrepreneurProfile = apps.get_model("accelerator", "EntrepreneurProfile")
for model in [EntrepreneurProfile, ExpertProfile, MemberProfile]:
add_gender_identity_data(model, gender_choices)
class Migration(migrations.Migration):
dependencies = [
("accelerator", "0032_add_ethno_racial_identity_data"),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
)
]
|
[AC-8354] Merge remote-tracking branch 'origin' into AC-8354# Generated by Django 2.2.10 on 2021-01-22 12:13
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = "m"
FEMALE_CHOICE = "f"
OTHER_CHOICE = "o"
PREFER_NOT_TO_STATE_CHOICE = "p"
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model("accelerator", "GenderChoices")
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity_data(model, gender_choices):
ThroughModel = model.gender_identity.through
for gender, gender_identity in gender_map.items():
gender_choice = gender_choices[gender_identity]
profiles = model.objects.filter(gender=gender)
ThroughModel.objects.bulk_create([
ThroughModel(**{
f"{model.__name__.lower()}": profile,
"genderchoices": gender_choices[gender_identity]
})
for profile in profiles if not profile.gender_identity.filter(
pk=gender_choice.pk).exists()
], ignore_conflicts=True)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
gender_choices = get_gender_choice_obj_dict(apps)
MemberProfile = apps.get_model("accelerator", "MemberProfile")
ExpertProfile = apps.get_model("accelerator", "ExpertProfile")
EntrepreneurProfile = apps.get_model("accelerator", "EntrepreneurProfile")
for model in [EntrepreneurProfile, ExpertProfile, MemberProfile]:
add_gender_identity_data(model, gender_choices)
class Migration(migrations.Migration):
dependencies = [
("accelerator", "0032_add_ethno_racial_identity_data"),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
)
]
|
<commit_before><commit_msg>[AC-8354] Merge remote-tracking branch 'origin' into AC-8354<commit_after># Generated by Django 2.2.10 on 2021-01-22 12:13
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = "m"
FEMALE_CHOICE = "f"
OTHER_CHOICE = "o"
PREFER_NOT_TO_STATE_CHOICE = "p"
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model("accelerator", "GenderChoices")
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity_data(model, gender_choices):
ThroughModel = model.gender_identity.through
for gender, gender_identity in gender_map.items():
gender_choice = gender_choices[gender_identity]
profiles = model.objects.filter(gender=gender)
ThroughModel.objects.bulk_create([
ThroughModel(**{
f"{model.__name__.lower()}": profile,
"genderchoices": gender_choices[gender_identity]
})
for profile in profiles if not profile.gender_identity.filter(
pk=gender_choice.pk).exists()
], ignore_conflicts=True)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
gender_choices = get_gender_choice_obj_dict(apps)
MemberProfile = apps.get_model("accelerator", "MemberProfile")
ExpertProfile = apps.get_model("accelerator", "ExpertProfile")
EntrepreneurProfile = apps.get_model("accelerator", "EntrepreneurProfile")
for model in [EntrepreneurProfile, ExpertProfile, MemberProfile]:
add_gender_identity_data(model, gender_choices)
class Migration(migrations.Migration):
dependencies = [
("accelerator", "0032_add_ethno_racial_identity_data"),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
)
]
|
|
8c47266c9bf8f91e03a0770befced1eef443b055
|
problem4.py
|
problem4.py
|
"""
Given: Positive integers n>=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in
each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs
(instead of only 1 pair).
"""
# Based off a fibonnaci sequence with rabbits from 2 months ago producing k rabbits
def rabbits(n, k, _cache={1: 1, 2: 1}):
if n not in _cache:
_cache[n] = rabbits(n - 1, k) + rabbits(n - 2, k) * k
return _cache[n]
if __name__ == '__main__':
print rabbits(29, 5)
|
Add solution to Rabbits and Recurrence Relations
|
Add solution to Rabbits and Recurrence Relations
|
Python
|
mit
|
MichaelAquilina/rosalind-solutions
|
Add solution to Rabbits and Recurrence Relations
|
"""
Given: Positive integers n>=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in
each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs
(instead of only 1 pair).
"""
# Based off a fibonnaci sequence with rabbits from 2 months ago producing k rabbits
def rabbits(n, k, _cache={1: 1, 2: 1}):
if n not in _cache:
_cache[n] = rabbits(n - 1, k) + rabbits(n - 2, k) * k
return _cache[n]
if __name__ == '__main__':
print rabbits(29, 5)
|
<commit_before><commit_msg>Add solution to Rabbits and Recurrence Relations<commit_after>
|
"""
Given: Positive integers n>=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in
each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs
(instead of only 1 pair).
"""
# Based off a fibonnaci sequence with rabbits from 2 months ago producing k rabbits
def rabbits(n, k, _cache={1: 1, 2: 1}):
if n not in _cache:
_cache[n] = rabbits(n - 1, k) + rabbits(n - 2, k) * k
return _cache[n]
if __name__ == '__main__':
print rabbits(29, 5)
|
Add solution to Rabbits and Recurrence Relations"""
Given: Positive integers n>=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in
each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs
(instead of only 1 pair).
"""
# Based off a fibonnaci sequence with rabbits from 2 months ago producing k rabbits
def rabbits(n, k, _cache={1: 1, 2: 1}):
if n not in _cache:
_cache[n] = rabbits(n - 1, k) + rabbits(n - 2, k) * k
return _cache[n]
if __name__ == '__main__':
print rabbits(29, 5)
|
<commit_before><commit_msg>Add solution to Rabbits and Recurrence Relations<commit_after>"""
Given: Positive integers n>=40 and k<=5.
Return: The total number of rabbit pairs that will be present after n months if we begin with 1 pair and in
each generation, every pair of reproduction-age rabbits produces a litter of k rabbit pairs
(instead of only 1 pair).
"""
# Based off a fibonnaci sequence with rabbits from 2 months ago producing k rabbits
def rabbits(n, k, _cache={1: 1, 2: 1}):
if n not in _cache:
_cache[n] = rabbits(n - 1, k) + rabbits(n - 2, k) * k
return _cache[n]
if __name__ == '__main__':
print rabbits(29, 5)
|
|
32a1ccc5856fd58e841a3960dbe17ceb029eb8d8
|
examples/test_parse_bool.py
|
examples/test_parse_bool.py
|
def test_parse_bool(wish):
parse_bool = wish
assert not parse_bool('false')
assert not parse_bool('FALSE')
assert not parse_bool('0')
assert parse_bool('true')
assert parse_bool('TRUE')
assert parse_bool('1')
|
Add test file use in the docs.
|
Add test file use in the docs.
|
Python
|
mit
|
alexamici/pytest-wish,alexamici/pytest-nodev,nodev-io/pytest-nodev
|
Add test file use in the docs.
|
def test_parse_bool(wish):
parse_bool = wish
assert not parse_bool('false')
assert not parse_bool('FALSE')
assert not parse_bool('0')
assert parse_bool('true')
assert parse_bool('TRUE')
assert parse_bool('1')
|
<commit_before><commit_msg>Add test file use in the docs.<commit_after>
|
def test_parse_bool(wish):
parse_bool = wish
assert not parse_bool('false')
assert not parse_bool('FALSE')
assert not parse_bool('0')
assert parse_bool('true')
assert parse_bool('TRUE')
assert parse_bool('1')
|
Add test file use in the docs.def test_parse_bool(wish):
parse_bool = wish
assert not parse_bool('false')
assert not parse_bool('FALSE')
assert not parse_bool('0')
assert parse_bool('true')
assert parse_bool('TRUE')
assert parse_bool('1')
|
<commit_before><commit_msg>Add test file use in the docs.<commit_after>def test_parse_bool(wish):
parse_bool = wish
assert not parse_bool('false')
assert not parse_bool('FALSE')
assert not parse_bool('0')
assert parse_bool('true')
assert parse_bool('TRUE')
assert parse_bool('1')
|
|
bc551c0d46c63ed688184e45e3211e29a30214d7
|
dbaas/workflow/steps/util/resize/check_database_status.py
|
dbaas/workflow/steps/util/resize/check_database_status.py
|
# -*- coding: utf-8 -*-
import logging
from ...util.base import BaseStep
LOG = logging.getLogger(__name__)
class CheckDatabaseStatus(BaseStep):
def __unicode__(self):
return "Checking database status..."
def do(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
if not 'databaseinfra' in workflow_dict:
workflow_dict['databaseinfra'] = workflow_dict['database'].databaseinfra
LOG.info("Getting driver class")
driver = workflow_dict['databaseinfra'].get_driver()
if driver.check_status():
LOG.info("Database is ok...")
workflow_dict['database'].status=1
workflow_dict['database'].save()
return True
return False
except Exception,e:
LOG.info("Error: {}".format(e))
pass
def undo(self, workflow_dict):
LOG.info("Nothing to do here...")
return True
|
Add check db status step
|
Add check db status step
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Add check db status step
|
# -*- coding: utf-8 -*-
import logging
from ...util.base import BaseStep
LOG = logging.getLogger(__name__)
class CheckDatabaseStatus(BaseStep):
def __unicode__(self):
return "Checking database status..."
def do(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
if not 'databaseinfra' in workflow_dict:
workflow_dict['databaseinfra'] = workflow_dict['database'].databaseinfra
LOG.info("Getting driver class")
driver = workflow_dict['databaseinfra'].get_driver()
if driver.check_status():
LOG.info("Database is ok...")
workflow_dict['database'].status=1
workflow_dict['database'].save()
return True
return False
except Exception,e:
LOG.info("Error: {}".format(e))
pass
def undo(self, workflow_dict):
LOG.info("Nothing to do here...")
return True
|
<commit_before><commit_msg>Add check db status step<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from ...util.base import BaseStep
LOG = logging.getLogger(__name__)
class CheckDatabaseStatus(BaseStep):
def __unicode__(self):
return "Checking database status..."
def do(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
if not 'databaseinfra' in workflow_dict:
workflow_dict['databaseinfra'] = workflow_dict['database'].databaseinfra
LOG.info("Getting driver class")
driver = workflow_dict['databaseinfra'].get_driver()
if driver.check_status():
LOG.info("Database is ok...")
workflow_dict['database'].status=1
workflow_dict['database'].save()
return True
return False
except Exception,e:
LOG.info("Error: {}".format(e))
pass
def undo(self, workflow_dict):
LOG.info("Nothing to do here...")
return True
|
Add check db status step# -*- coding: utf-8 -*-
import logging
from ...util.base import BaseStep
LOG = logging.getLogger(__name__)
class CheckDatabaseStatus(BaseStep):
def __unicode__(self):
return "Checking database status..."
def do(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
if not 'databaseinfra' in workflow_dict:
workflow_dict['databaseinfra'] = workflow_dict['database'].databaseinfra
LOG.info("Getting driver class")
driver = workflow_dict['databaseinfra'].get_driver()
if driver.check_status():
LOG.info("Database is ok...")
workflow_dict['database'].status=1
workflow_dict['database'].save()
return True
return False
except Exception,e:
LOG.info("Error: {}".format(e))
pass
def undo(self, workflow_dict):
LOG.info("Nothing to do here...")
return True
|
<commit_before><commit_msg>Add check db status step<commit_after># -*- coding: utf-8 -*-
import logging
from ...util.base import BaseStep
LOG = logging.getLogger(__name__)
class CheckDatabaseStatus(BaseStep):
def __unicode__(self):
return "Checking database status..."
def do(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
if not 'databaseinfra' in workflow_dict:
workflow_dict['databaseinfra'] = workflow_dict['database'].databaseinfra
LOG.info("Getting driver class")
driver = workflow_dict['databaseinfra'].get_driver()
if driver.check_status():
LOG.info("Database is ok...")
workflow_dict['database'].status=1
workflow_dict['database'].save()
return True
return False
except Exception,e:
LOG.info("Error: {}".format(e))
pass
def undo(self, workflow_dict):
LOG.info("Nothing to do here...")
return True
|
|
2399f02a08f06aa120aae351aad7a4402aca8eb4
|
climate_data/migrations/0021_auto_20170619_2053.py
|
climate_data/migrations/0021_auto_20170619_2053.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0020_annotation'),
]
operations = [
migrations.RemoveField(
model_name='sensor',
name='data_type',
),
migrations.AddField(
model_name='stationsensorlink',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.DataType'),
),
]
|
Move data type field from sensor model to station-sensor link model to make sensors more 'global'.
|
Move data type field from sensor model to station-sensor link model to make sensors more 'global'.
|
Python
|
apache-2.0
|
qubs/climate-data-api,qubs/data-centre,qubs/climate-data-api,qubs/data-centre
|
Move data type field from sensor model to station-sensor link model to make sensors more 'global'.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0020_annotation'),
]
operations = [
migrations.RemoveField(
model_name='sensor',
name='data_type',
),
migrations.AddField(
model_name='stationsensorlink',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.DataType'),
),
]
|
<commit_before><commit_msg>Move data type field from sensor model to station-sensor link model to make sensors more 'global'.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0020_annotation'),
]
operations = [
migrations.RemoveField(
model_name='sensor',
name='data_type',
),
migrations.AddField(
model_name='stationsensorlink',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.DataType'),
),
]
|
Move data type field from sensor model to station-sensor link model to make sensors more 'global'.# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0020_annotation'),
]
operations = [
migrations.RemoveField(
model_name='sensor',
name='data_type',
),
migrations.AddField(
model_name='stationsensorlink',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.DataType'),
),
]
|
<commit_before><commit_msg>Move data type field from sensor model to station-sensor link model to make sensors more 'global'.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-19 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0020_annotation'),
]
operations = [
migrations.RemoveField(
model_name='sensor',
name='data_type',
),
migrations.AddField(
model_name='stationsensorlink',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='climate_data.DataType'),
),
]
|
|
9ee332f6f0af3d632860581971446f9edf4f74be
|
changetext/WIKIXML2TW.py
|
changetext/WIKIXML2TW.py
|
def WIKIXML2TW(inputfilename, outputfilename):
"Convert Wikimedia XML dump to TiddlyWiki import file"
inputfile = open(inputfilename, "r")
xmlinput = unicode(inputfile.read(), errors='ignore')
outputfilemenu = open(outputfilename + '.menu', "w")
outputfile = open(outputfilename, "w")
outputfile.write('<html><head></head><body><div id="storeArea">'.encode('utf-8'))
startpos = 0
while startpos > -1:
titleString = ''
textString = ''
startpos = xmlinput.find('<title>', startpos)
if startpos > -1:
titleString = xmlinput[startpos+len('<title>'):xmlinput.find('</title>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</title>', startpos)
if startpos > -1:
startpos = xmlinput.find('<text xml:space="preserve">', startpos)
if startpos > -1:
textString = xmlinput[startpos+len('<text xml:space="preserve">'):xmlinput.find('</text>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</text>', startpos)
if titleString != '':
originalTitleString = titleString
titleString = titleString.replace(' ', '_')
outputfile.write('<div title="'.encode('utf-8'))
outputfile.write(titleString.encode('utf-8'))
outputfilemenu.write('[[' + originalTitleString + '|' + titleString.encode('utf-8') + ']]\n')
outputfile.write('" creator="YourName" modifier="YourName" created="201309161317" modified="201309161321" tags="MediaWikiFormat" changecount="1"><pre>'.encode('utf-8'))
outputfile.write(textString.encode('utf-8'))
outputfile.write('</pre></div>\n'.encode('utf-8'))
outputfile.write('</div></body></html>'.encode('utf-8'))
outputfile.close()
outputfilemenu.close()
return;
|
Convert wiki XML to Tiddlywiki import format
|
Convert wiki XML to Tiddlywiki import format
|
Python
|
mit
|
cottley/moruga
|
Convert wiki XML to Tiddlywiki import format
|
def WIKIXML2TW(inputfilename, outputfilename):
"Convert Wikimedia XML dump to TiddlyWiki import file"
inputfile = open(inputfilename, "r")
xmlinput = unicode(inputfile.read(), errors='ignore')
outputfilemenu = open(outputfilename + '.menu', "w")
outputfile = open(outputfilename, "w")
outputfile.write('<html><head></head><body><div id="storeArea">'.encode('utf-8'))
startpos = 0
while startpos > -1:
titleString = ''
textString = ''
startpos = xmlinput.find('<title>', startpos)
if startpos > -1:
titleString = xmlinput[startpos+len('<title>'):xmlinput.find('</title>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</title>', startpos)
if startpos > -1:
startpos = xmlinput.find('<text xml:space="preserve">', startpos)
if startpos > -1:
textString = xmlinput[startpos+len('<text xml:space="preserve">'):xmlinput.find('</text>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</text>', startpos)
if titleString != '':
originalTitleString = titleString
titleString = titleString.replace(' ', '_')
outputfile.write('<div title="'.encode('utf-8'))
outputfile.write(titleString.encode('utf-8'))
outputfilemenu.write('[[' + originalTitleString + '|' + titleString.encode('utf-8') + ']]\n')
outputfile.write('" creator="YourName" modifier="YourName" created="201309161317" modified="201309161321" tags="MediaWikiFormat" changecount="1"><pre>'.encode('utf-8'))
outputfile.write(textString.encode('utf-8'))
outputfile.write('</pre></div>\n'.encode('utf-8'))
outputfile.write('</div></body></html>'.encode('utf-8'))
outputfile.close()
outputfilemenu.close()
return;
|
<commit_before><commit_msg>Convert wiki XML to Tiddlywiki import format<commit_after>
|
def WIKIXML2TW(inputfilename, outputfilename):
"Convert Wikimedia XML dump to TiddlyWiki import file"
inputfile = open(inputfilename, "r")
xmlinput = unicode(inputfile.read(), errors='ignore')
outputfilemenu = open(outputfilename + '.menu', "w")
outputfile = open(outputfilename, "w")
outputfile.write('<html><head></head><body><div id="storeArea">'.encode('utf-8'))
startpos = 0
while startpos > -1:
titleString = ''
textString = ''
startpos = xmlinput.find('<title>', startpos)
if startpos > -1:
titleString = xmlinput[startpos+len('<title>'):xmlinput.find('</title>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</title>', startpos)
if startpos > -1:
startpos = xmlinput.find('<text xml:space="preserve">', startpos)
if startpos > -1:
textString = xmlinput[startpos+len('<text xml:space="preserve">'):xmlinput.find('</text>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</text>', startpos)
if titleString != '':
originalTitleString = titleString
titleString = titleString.replace(' ', '_')
outputfile.write('<div title="'.encode('utf-8'))
outputfile.write(titleString.encode('utf-8'))
outputfilemenu.write('[[' + originalTitleString + '|' + titleString.encode('utf-8') + ']]\n')
outputfile.write('" creator="YourName" modifier="YourName" created="201309161317" modified="201309161321" tags="MediaWikiFormat" changecount="1"><pre>'.encode('utf-8'))
outputfile.write(textString.encode('utf-8'))
outputfile.write('</pre></div>\n'.encode('utf-8'))
outputfile.write('</div></body></html>'.encode('utf-8'))
outputfile.close()
outputfilemenu.close()
return;
|
Convert wiki XML to Tiddlywiki import format
def WIKIXML2TW(inputfilename, outputfilename):
"Convert Wikimedia XML dump to TiddlyWiki import file"
inputfile = open(inputfilename, "r")
xmlinput = unicode(inputfile.read(), errors='ignore')
outputfilemenu = open(outputfilename + '.menu', "w")
outputfile = open(outputfilename, "w")
outputfile.write('<html><head></head><body><div id="storeArea">'.encode('utf-8'))
startpos = 0
while startpos > -1:
titleString = ''
textString = ''
startpos = xmlinput.find('<title>', startpos)
if startpos > -1:
titleString = xmlinput[startpos+len('<title>'):xmlinput.find('</title>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</title>', startpos)
if startpos > -1:
startpos = xmlinput.find('<text xml:space="preserve">', startpos)
if startpos > -1:
textString = xmlinput[startpos+len('<text xml:space="preserve">'):xmlinput.find('</text>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</text>', startpos)
if titleString != '':
originalTitleString = titleString
titleString = titleString.replace(' ', '_')
outputfile.write('<div title="'.encode('utf-8'))
outputfile.write(titleString.encode('utf-8'))
outputfilemenu.write('[[' + originalTitleString + '|' + titleString.encode('utf-8') + ']]\n')
outputfile.write('" creator="YourName" modifier="YourName" created="201309161317" modified="201309161321" tags="MediaWikiFormat" changecount="1"><pre>'.encode('utf-8'))
outputfile.write(textString.encode('utf-8'))
outputfile.write('</pre></div>\n'.encode('utf-8'))
outputfile.write('</div></body></html>'.encode('utf-8'))
outputfile.close()
outputfilemenu.close()
return;
|
<commit_before><commit_msg>Convert wiki XML to Tiddlywiki import format<commit_after>
def WIKIXML2TW(inputfilename, outputfilename):
"Convert Wikimedia XML dump to TiddlyWiki import file"
inputfile = open(inputfilename, "r")
xmlinput = unicode(inputfile.read(), errors='ignore')
outputfilemenu = open(outputfilename + '.menu', "w")
outputfile = open(outputfilename, "w")
outputfile.write('<html><head></head><body><div id="storeArea">'.encode('utf-8'))
startpos = 0
while startpos > -1:
titleString = ''
textString = ''
startpos = xmlinput.find('<title>', startpos)
if startpos > -1:
titleString = xmlinput[startpos+len('<title>'):xmlinput.find('</title>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</title>', startpos)
if startpos > -1:
startpos = xmlinput.find('<text xml:space="preserve">', startpos)
if startpos > -1:
textString = xmlinput[startpos+len('<text xml:space="preserve">'):xmlinput.find('</text>', startpos)]
if startpos > -1:
startpos = xmlinput.find('</text>', startpos)
if titleString != '':
originalTitleString = titleString
titleString = titleString.replace(' ', '_')
outputfile.write('<div title="'.encode('utf-8'))
outputfile.write(titleString.encode('utf-8'))
outputfilemenu.write('[[' + originalTitleString + '|' + titleString.encode('utf-8') + ']]\n')
outputfile.write('" creator="YourName" modifier="YourName" created="201309161317" modified="201309161321" tags="MediaWikiFormat" changecount="1"><pre>'.encode('utf-8'))
outputfile.write(textString.encode('utf-8'))
outputfile.write('</pre></div>\n'.encode('utf-8'))
outputfile.write('</div></body></html>'.encode('utf-8'))
outputfile.close()
outputfilemenu.close()
return;
|
|
2f508798b0a5f78ef6c04302e6983e4b824a2df0
|
exercises/chapter_02/exercise_02_10/exercise_02_10.py
|
exercises/chapter_02/exercise_02_10/exercise_02_10.py
|
# 2-10 Adding Comments
# Below follows the code from exercise 2-3 and 2-4 again but now with (more) comments
# 2-3 Personal Message
name = "Henrik" # variable for storing user name
# concatenate the user name with some strings to form a personal message
print("Hello " + name + ", lets learn some Python today!") #
# 2-4 Name Cases
name = "henrik samuelsson"
print(name.lower()) # change all letters to lower case and print
print(name.upper()) # change all letters to upper case and print
print(name.title()) # change the first letter of the names to upper case and print
|
Add solution to exercise 2-10.
|
Add solution to exercise 2-10.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add solution to exercise 2-10.
|
# 2-10 Adding Comments
# Below follows the code from exercise 2-3 and 2-4 again but now with (more) comments
# 2-3 Personal Message
name = "Henrik" # variable for storing user name
# concatenate the user name with some strings to form a personal message
print("Hello " + name + ", lets learn some Python today!") #
# 2-4 Name Cases
name = "henrik samuelsson"
print(name.lower()) # change all letters to lower case and print
print(name.upper()) # change all letters to upper case and print
print(name.title()) # change the first letter of the names to upper case and print
|
<commit_before><commit_msg>Add solution to exercise 2-10.<commit_after>
|
# 2-10 Adding Comments
# Below follows the code from exercise 2-3 and 2-4 again but now with (more) comments
# 2-3 Personal Message
name = "Henrik" # variable for storing user name
# concatenate the user name with some strings to form a personal message
print("Hello " + name + ", lets learn some Python today!") #
# 2-4 Name Cases
name = "henrik samuelsson"
print(name.lower()) # change all letters to lower case and print
print(name.upper()) # change all letters to upper case and print
print(name.title()) # change the first letter of the names to upper case and print
|
Add solution to exercise 2-10.# 2-10 Adding Comments
# Below follows the code from exercise 2-3 and 2-4 again but now with (more) comments
# 2-3 Personal Message
name = "Henrik" # variable for storing user name
# concatenate the user name with some strings to form a personal message
print("Hello " + name + ", lets learn some Python today!") #
# 2-4 Name Cases
name = "henrik samuelsson"
print(name.lower()) # change all letters to lower case and print
print(name.upper()) # change all letters to upper case and print
print(name.title()) # change the first letter of the names to upper case and print
|
<commit_before><commit_msg>Add solution to exercise 2-10.<commit_after># 2-10 Adding Comments
# Below follows the code from exercise 2-3 and 2-4 again but now with (more) comments
# 2-3 Personal Message
name = "Henrik" # variable for storing user name
# concatenate the user name with some strings to form a personal message
print("Hello " + name + ", lets learn some Python today!") #
# 2-4 Name Cases
name = "henrik samuelsson"
print(name.lower()) # change all letters to lower case and print
print(name.upper()) # change all letters to upper case and print
print(name.title()) # change the first letter of the names to upper case and print
|
|
ac8c5a878c8ef05c139ce3d86378b3c9ca4845d3
|
tools/bbox_post_process.py
|
tools/bbox_post_process.py
|
#!/usr/bin/env python
import init
import caffe
import numpy as np
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('net_def')
parser.add_argument('net_param')
parser.add_argument('save_file')
parser.add_argument('--bbox_means', default='bbox_means.pkl')
parser.add_argument('--bbox_stds', default='bbox_stds.pkl')
args = parser.parse_args()
net = caffe.Net(args.net_def, args.net_param, caffe.TEST)
with open(args.bbox_means, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_stds, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred'][0].data[...] = net.params['bbox_pred'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred'][1].data[...] = net.params['bbox_pred'][1].data * bbox_stds + bbox_means
print "Saved to {}.".format(args.save_file)
net.save(args.save_file)
|
Add a script for process trained models using bbox means and stds.
|
Add a script for process trained models using bbox means and stds.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script for process trained models using bbox means and stds.
|
#!/usr/bin/env python
import init
import caffe
import numpy as np
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('net_def')
parser.add_argument('net_param')
parser.add_argument('save_file')
parser.add_argument('--bbox_means', default='bbox_means.pkl')
parser.add_argument('--bbox_stds', default='bbox_stds.pkl')
args = parser.parse_args()
net = caffe.Net(args.net_def, args.net_param, caffe.TEST)
with open(args.bbox_means, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_stds, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred'][0].data[...] = net.params['bbox_pred'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred'][1].data[...] = net.params['bbox_pred'][1].data * bbox_stds + bbox_means
print "Saved to {}.".format(args.save_file)
net.save(args.save_file)
|
<commit_before><commit_msg>Add a script for process trained models using bbox means and stds.<commit_after>
|
#!/usr/bin/env python
import init
import caffe
import numpy as np
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('net_def')
parser.add_argument('net_param')
parser.add_argument('save_file')
parser.add_argument('--bbox_means', default='bbox_means.pkl')
parser.add_argument('--bbox_stds', default='bbox_stds.pkl')
args = parser.parse_args()
net = caffe.Net(args.net_def, args.net_param, caffe.TEST)
with open(args.bbox_means, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_stds, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred'][0].data[...] = net.params['bbox_pred'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred'][1].data[...] = net.params['bbox_pred'][1].data * bbox_stds + bbox_means
print "Saved to {}.".format(args.save_file)
net.save(args.save_file)
|
Add a script for process trained models using bbox means and stds.#!/usr/bin/env python
import init
import caffe
import numpy as np
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('net_def')
parser.add_argument('net_param')
parser.add_argument('save_file')
parser.add_argument('--bbox_means', default='bbox_means.pkl')
parser.add_argument('--bbox_stds', default='bbox_stds.pkl')
args = parser.parse_args()
net = caffe.Net(args.net_def, args.net_param, caffe.TEST)
with open(args.bbox_means, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_stds, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred'][0].data[...] = net.params['bbox_pred'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred'][1].data[...] = net.params['bbox_pred'][1].data * bbox_stds + bbox_means
print "Saved to {}.".format(args.save_file)
net.save(args.save_file)
|
<commit_before><commit_msg>Add a script for process trained models using bbox means and stds.<commit_after>#!/usr/bin/env python
import init
import caffe
import numpy as np
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('net_def')
parser.add_argument('net_param')
parser.add_argument('save_file')
parser.add_argument('--bbox_means', default='bbox_means.pkl')
parser.add_argument('--bbox_stds', default='bbox_stds.pkl')
args = parser.parse_args()
net = caffe.Net(args.net_def, args.net_param, caffe.TEST)
with open(args.bbox_means, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_stds, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred'][0].data[...] = net.params['bbox_pred'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred'][1].data[...] = net.params['bbox_pred'][1].data * bbox_stds + bbox_means
print "Saved to {}.".format(args.save_file)
net.save(args.save_file)
|
|
e00936dda07b53c22f66897d7e487d12374172e7
|
corehq/apps/cleanup/management/commands/get_doc_domains.py
|
corehq/apps/cleanup/management/commands/get_doc_domains.py
|
import os
from optparse import make_option
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound
from dimagi.utils.couch.database import get_db
# possible expansion: allow this to accept doc ids on the command line
# for use like `cat doc_ids.txt | ./manage.py get_doc_domains `xargs echo`
class Command(BaseCommand):
help = "Takes a file with one doc id per line and outputs their domains"
args = '<filename>'
option_list = BaseCommand.option_list + (
make_option('--full', action='store_true', dest='full', default=False,
help = "Output a full list of doc ids, followed by their domain"),
)
def handle(self, *args, **options):
self.full = options.get('full')
if not args:
return "You must pass in a file name"
filename = args[0]
if not os.path.exists(filename):
return "File %s not found" % filename
with open(filename) as file:
doc_ids = file.readlines()
self.domains = set()
self.db = get_db()
for id in doc_ids:
self.handle_doc(id.strip())
def handle_doc(self, id):
try:
doc = self.db.get(id)
except ResourceNotFound:
doc = {}
domain = doc.get('domain', None)
if self.full:
print "{0:<40}{1}".format(id, domain)
elif domain and domain not in self.domains:
self.domains.add(domain)
print domain
|
Add management command to get domains from doc ids
|
Add management command to get domains from doc ids
|
Python
|
bsd-3-clause
|
gmimano/commcaretest,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Add management command to get domains from doc ids
|
import os
from optparse import make_option
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound
from dimagi.utils.couch.database import get_db
# possible expansion: allow this to accept doc ids on the command line
# for use like `cat doc_ids.txt | ./manage.py get_doc_domains `xargs echo`
class Command(BaseCommand):
help = "Takes a file with one doc id per line and outputs their domains"
args = '<filename>'
option_list = BaseCommand.option_list + (
make_option('--full', action='store_true', dest='full', default=False,
help = "Output a full list of doc ids, followed by their domain"),
)
def handle(self, *args, **options):
self.full = options.get('full')
if not args:
return "You must pass in a file name"
filename = args[0]
if not os.path.exists(filename):
return "File %s not found" % filename
with open(filename) as file:
doc_ids = file.readlines()
self.domains = set()
self.db = get_db()
for id in doc_ids:
self.handle_doc(id.strip())
def handle_doc(self, id):
try:
doc = self.db.get(id)
except ResourceNotFound:
doc = {}
domain = doc.get('domain', None)
if self.full:
print "{0:<40}{1}".format(id, domain)
elif domain and domain not in self.domains:
self.domains.add(domain)
print domain
|
<commit_before><commit_msg>Add management command to get domains from doc ids<commit_after>
|
import os
from optparse import make_option
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound
from dimagi.utils.couch.database import get_db
# possible expansion: allow this to accept doc ids on the command line
# for use like `cat doc_ids.txt | ./manage.py get_doc_domains `xargs echo`
class Command(BaseCommand):
help = "Takes a file with one doc id per line and outputs their domains"
args = '<filename>'
option_list = BaseCommand.option_list + (
make_option('--full', action='store_true', dest='full', default=False,
help = "Output a full list of doc ids, followed by their domain"),
)
def handle(self, *args, **options):
self.full = options.get('full')
if not args:
return "You must pass in a file name"
filename = args[0]
if not os.path.exists(filename):
return "File %s not found" % filename
with open(filename) as file:
doc_ids = file.readlines()
self.domains = set()
self.db = get_db()
for id in doc_ids:
self.handle_doc(id.strip())
def handle_doc(self, id):
try:
doc = self.db.get(id)
except ResourceNotFound:
doc = {}
domain = doc.get('domain', None)
if self.full:
print "{0:<40}{1}".format(id, domain)
elif domain and domain not in self.domains:
self.domains.add(domain)
print domain
|
Add management command to get domains from doc idsimport os
from optparse import make_option
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound
from dimagi.utils.couch.database import get_db
# possible expansion: allow this to accept doc ids on the command line
# for use like `cat doc_ids.txt | ./manage.py get_doc_domains `xargs echo`
class Command(BaseCommand):
help = "Takes a file with one doc id per line and outputs their domains"
args = '<filename>'
option_list = BaseCommand.option_list + (
make_option('--full', action='store_true', dest='full', default=False,
help = "Output a full list of doc ids, followed by their domain"),
)
def handle(self, *args, **options):
self.full = options.get('full')
if not args:
return "You must pass in a file name"
filename = args[0]
if not os.path.exists(filename):
return "File %s not found" % filename
with open(filename) as file:
doc_ids = file.readlines()
self.domains = set()
self.db = get_db()
for id in doc_ids:
self.handle_doc(id.strip())
def handle_doc(self, id):
try:
doc = self.db.get(id)
except ResourceNotFound:
doc = {}
domain = doc.get('domain', None)
if self.full:
print "{0:<40}{1}".format(id, domain)
elif domain and domain not in self.domains:
self.domains.add(domain)
print domain
|
<commit_before><commit_msg>Add management command to get domains from doc ids<commit_after>import os
from optparse import make_option
from django.core.management.base import BaseCommand
from couchdbkit.exceptions import ResourceNotFound
from dimagi.utils.couch.database import get_db
# possible expansion: allow this to accept doc ids on the command line
# for use like `cat doc_ids.txt | ./manage.py get_doc_domains `xargs echo`
class Command(BaseCommand):
help = "Takes a file with one doc id per line and outputs their domains"
args = '<filename>'
option_list = BaseCommand.option_list + (
make_option('--full', action='store_true', dest='full', default=False,
help = "Output a full list of doc ids, followed by their domain"),
)
def handle(self, *args, **options):
self.full = options.get('full')
if not args:
return "You must pass in a file name"
filename = args[0]
if not os.path.exists(filename):
return "File %s not found" % filename
with open(filename) as file:
doc_ids = file.readlines()
self.domains = set()
self.db = get_db()
for id in doc_ids:
self.handle_doc(id.strip())
def handle_doc(self, id):
try:
doc = self.db.get(id)
except ResourceNotFound:
doc = {}
domain = doc.get('domain', None)
if self.full:
print "{0:<40}{1}".format(id, domain)
elif domain and domain not in self.domains:
self.domains.add(domain)
print domain
|
|
24f6ebb4218f3fc1858c6100c78c41c9744b55ec
|
migrations/versions/0186_rename_is_active_columns.py
|
migrations/versions/0186_rename_is_active_columns.py
|
"""
Revision ID: 0186_rename_is_active_columns
Revises: 0185_add_is_active_to_reply_tos
Create Date: 2018-04-27 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0186_rename_is_active_columns'
down_revision = '0185_add_is_active_to_reply_tos'
def upgrade():
op.alter_column('service_email_reply_to', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_letter_contacts', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_sms_senders', 'is_active', new_column_name='archived', server_default=sa.false())
op.execute("Update service_email_reply_to set archived = False")
op.execute("Update service_letter_contacts set archived = False")
op.execute("Update service_sms_senders set archived = False")
def downgrade():
op.alter_column('service_email_reply_to', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_letter_contacts', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_sms_senders', 'archived', new_column_name='is_active', server_default=sa.true())
op.execute("Update service_email_reply_to set is_active = True")
op.execute("Update service_letter_contacts set is_active = True")
op.execute("Update service_sms_senders set is_active = True")
|
Change 'is_active' column names to 'archived'
|
Change 'is_active' column names to 'archived'
Changed the name of the `is_active` columns to `archived` in these
tables:
* `service_email_reply_to`
* `service_sms_senders`
* `service_letter_contacts`
These columns were not being used yet, but because we describe Templates as
'archived', this keeps the names consistent.
The default value of these columns now needs to be False, not True.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Change 'is_active' column names to 'archived'
Changed the name of the `is_active` columns to `archived` in these
tables:
* `service_email_reply_to`
* `service_sms_senders`
* `service_letter_contacts`
These columns were not being used yet, but because we describe Templates as
'archived', this keeps the names consistent.
The default value of these columns now needs to be False, not True.
|
"""
Revision ID: 0186_rename_is_active_columns
Revises: 0185_add_is_active_to_reply_tos
Create Date: 2018-04-27 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0186_rename_is_active_columns'
down_revision = '0185_add_is_active_to_reply_tos'
def upgrade():
op.alter_column('service_email_reply_to', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_letter_contacts', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_sms_senders', 'is_active', new_column_name='archived', server_default=sa.false())
op.execute("Update service_email_reply_to set archived = False")
op.execute("Update service_letter_contacts set archived = False")
op.execute("Update service_sms_senders set archived = False")
def downgrade():
op.alter_column('service_email_reply_to', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_letter_contacts', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_sms_senders', 'archived', new_column_name='is_active', server_default=sa.true())
op.execute("Update service_email_reply_to set is_active = True")
op.execute("Update service_letter_contacts set is_active = True")
op.execute("Update service_sms_senders set is_active = True")
|
<commit_before><commit_msg>Change 'is_active' column names to 'archived'
Changed the name of the `is_active` columns to `archived` in these
tables:
* `service_email_reply_to`
* `service_sms_senders`
* `service_letter_contacts`
These columns were not being used yet, but because we describe Templates as
'archived', this keeps the names consistent.
The default value of these columns now needs to be False, not True.<commit_after>
|
"""
Revision ID: 0186_rename_is_active_columns
Revises: 0185_add_is_active_to_reply_tos
Create Date: 2018-04-27 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0186_rename_is_active_columns'
down_revision = '0185_add_is_active_to_reply_tos'
def upgrade():
op.alter_column('service_email_reply_to', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_letter_contacts', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_sms_senders', 'is_active', new_column_name='archived', server_default=sa.false())
op.execute("Update service_email_reply_to set archived = False")
op.execute("Update service_letter_contacts set archived = False")
op.execute("Update service_sms_senders set archived = False")
def downgrade():
op.alter_column('service_email_reply_to', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_letter_contacts', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_sms_senders', 'archived', new_column_name='is_active', server_default=sa.true())
op.execute("Update service_email_reply_to set is_active = True")
op.execute("Update service_letter_contacts set is_active = True")
op.execute("Update service_sms_senders set is_active = True")
|
Change 'is_active' column names to 'archived'
Changed the name of the `is_active` columns to `archived` in these
tables:
* `service_email_reply_to`
* `service_sms_senders`
* `service_letter_contacts`
These columns were not being used yet, but because we describe Templates as
'archived', this keeps the names consistent.
The default value of these columns now needs to be False, not True."""
Revision ID: 0186_rename_is_active_columns
Revises: 0185_add_is_active_to_reply_tos
Create Date: 2018-04-27 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0186_rename_is_active_columns'
down_revision = '0185_add_is_active_to_reply_tos'
def upgrade():
op.alter_column('service_email_reply_to', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_letter_contacts', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_sms_senders', 'is_active', new_column_name='archived', server_default=sa.false())
op.execute("Update service_email_reply_to set archived = False")
op.execute("Update service_letter_contacts set archived = False")
op.execute("Update service_sms_senders set archived = False")
def downgrade():
op.alter_column('service_email_reply_to', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_letter_contacts', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_sms_senders', 'archived', new_column_name='is_active', server_default=sa.true())
op.execute("Update service_email_reply_to set is_active = True")
op.execute("Update service_letter_contacts set is_active = True")
op.execute("Update service_sms_senders set is_active = True")
|
<commit_before><commit_msg>Change 'is_active' column names to 'archived'
Changed the name of the `is_active` columns to `archived` in these
tables:
* `service_email_reply_to`
* `service_sms_senders`
* `service_letter_contacts`
These columns were not being used yet, but because we describe Templates as
'archived', this keeps the names consistent.
The default value of these columns now needs to be False, not True.<commit_after>"""
Revision ID: 0186_rename_is_active_columns
Revises: 0185_add_is_active_to_reply_tos
Create Date: 2018-04-27 16:35:41.824981
"""
from alembic import op
import sqlalchemy as sa
revision = '0186_rename_is_active_columns'
down_revision = '0185_add_is_active_to_reply_tos'
def upgrade():
op.alter_column('service_email_reply_to', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_letter_contacts', 'is_active', new_column_name='archived', server_default=sa.false())
op.alter_column('service_sms_senders', 'is_active', new_column_name='archived', server_default=sa.false())
op.execute("Update service_email_reply_to set archived = False")
op.execute("Update service_letter_contacts set archived = False")
op.execute("Update service_sms_senders set archived = False")
def downgrade():
op.alter_column('service_email_reply_to', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_letter_contacts', 'archived', new_column_name='is_active', server_default=sa.true())
op.alter_column('service_sms_senders', 'archived', new_column_name='is_active', server_default=sa.true())
op.execute("Update service_email_reply_to set is_active = True")
op.execute("Update service_letter_contacts set is_active = True")
op.execute("Update service_sms_senders set is_active = True")
|
|
2774139f64f83e5b173a5acb67f986648b52dd6f
|
error-propagation/error-propagation.py
|
error-propagation/error-propagation.py
|
#!/usr/bin/env python3
import numpy
import random
random.seed(54864218)
def f(a, b):
coefficients = numpy.array([[2*a + b, a + b], [a - b, a - 2*b]])
inv_coefficients = numpy.linalg.inv(coefficients)
vars = numpy.array([2.5306, 10.1])
elements = numpy.matmul(inv_coefficients, vars)
return elements[0] / elements[1]
N = 1000
input_a_mean = 3
input_a_stdev = 2
input_b_mean = 6
input_b_stdev = 1
result_sample = []
for _ in range(N):
a = random.gauss(input_a_mean, input_a_stdev)
b = random.gauss(input_b_mean, input_b_stdev)
result_sample.append(f(a, b))
result_mean = numpy.mean(result_sample)
result_stdev = numpy.std(result_sample)
print('Resulting value from %d samples: %f +- %f' % (N, result_mean, result_stdev))
|
Add a basic error propagation example.
|
Add a basic error propagation example.
|
Python
|
mpl-2.0
|
DanielBrookRoberge/MonteCarloExamples
|
Add a basic error propagation example.
|
#!/usr/bin/env python3
import numpy
import random
random.seed(54864218)
def f(a, b):
coefficients = numpy.array([[2*a + b, a + b], [a - b, a - 2*b]])
inv_coefficients = numpy.linalg.inv(coefficients)
vars = numpy.array([2.5306, 10.1])
elements = numpy.matmul(inv_coefficients, vars)
return elements[0] / elements[1]
N = 1000
input_a_mean = 3
input_a_stdev = 2
input_b_mean = 6
input_b_stdev = 1
result_sample = []
for _ in range(N):
a = random.gauss(input_a_mean, input_a_stdev)
b = random.gauss(input_b_mean, input_b_stdev)
result_sample.append(f(a, b))
result_mean = numpy.mean(result_sample)
result_stdev = numpy.std(result_sample)
print('Resulting value from %d samples: %f +- %f' % (N, result_mean, result_stdev))
|
<commit_before><commit_msg>Add a basic error propagation example.<commit_after>
|
#!/usr/bin/env python3
import numpy
import random
random.seed(54864218)
def f(a, b):
coefficients = numpy.array([[2*a + b, a + b], [a - b, a - 2*b]])
inv_coefficients = numpy.linalg.inv(coefficients)
vars = numpy.array([2.5306, 10.1])
elements = numpy.matmul(inv_coefficients, vars)
return elements[0] / elements[1]
N = 1000
input_a_mean = 3
input_a_stdev = 2
input_b_mean = 6
input_b_stdev = 1
result_sample = []
for _ in range(N):
a = random.gauss(input_a_mean, input_a_stdev)
b = random.gauss(input_b_mean, input_b_stdev)
result_sample.append(f(a, b))
result_mean = numpy.mean(result_sample)
result_stdev = numpy.std(result_sample)
print('Resulting value from %d samples: %f +- %f' % (N, result_mean, result_stdev))
|
Add a basic error propagation example.#!/usr/bin/env python3
import numpy
import random
random.seed(54864218)
def f(a, b):
coefficients = numpy.array([[2*a + b, a + b], [a - b, a - 2*b]])
inv_coefficients = numpy.linalg.inv(coefficients)
vars = numpy.array([2.5306, 10.1])
elements = numpy.matmul(inv_coefficients, vars)
return elements[0] / elements[1]
N = 1000
input_a_mean = 3
input_a_stdev = 2
input_b_mean = 6
input_b_stdev = 1
result_sample = []
for _ in range(N):
a = random.gauss(input_a_mean, input_a_stdev)
b = random.gauss(input_b_mean, input_b_stdev)
result_sample.append(f(a, b))
result_mean = numpy.mean(result_sample)
result_stdev = numpy.std(result_sample)
print('Resulting value from %d samples: %f +- %f' % (N, result_mean, result_stdev))
|
<commit_before><commit_msg>Add a basic error propagation example.<commit_after>#!/usr/bin/env python3
import numpy
import random
random.seed(54864218)
def f(a, b):
coefficients = numpy.array([[2*a + b, a + b], [a - b, a - 2*b]])
inv_coefficients = numpy.linalg.inv(coefficients)
vars = numpy.array([2.5306, 10.1])
elements = numpy.matmul(inv_coefficients, vars)
return elements[0] / elements[1]
N = 1000
input_a_mean = 3
input_a_stdev = 2
input_b_mean = 6
input_b_stdev = 1
result_sample = []
for _ in range(N):
a = random.gauss(input_a_mean, input_a_stdev)
b = random.gauss(input_b_mean, input_b_stdev)
result_sample.append(f(a, b))
result_mean = numpy.mean(result_sample)
result_stdev = numpy.std(result_sample)
print('Resulting value from %d samples: %f +- %f' % (N, result_mean, result_stdev))
|
|
22c668a74682769fd6c41709630047e07f6d8915
|
example/NaCl/NaCl-gv.py
|
example/NaCl/NaCl-gv.py
|
import numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib.pyplot as plt
unitcell = read_vasp("POSCAR")
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
phonon.set_nac_params(nac_params)
phonon.set_group_velocity()
phonon.set_mesh([31, 31, 31])
qpoints, weights, frequencies, _ = phonon.get_mesh()
group_velocity = phonon.get_group_velocity()
gv_norm = np.sqrt((group_velocity ** 2).sum(axis=2))
for i, (f, g) in enumerate(zip(frequencies.T, gv_norm.T)):
plt.plot(f, g, 'o', label=('band%d' % (i + 1)))
plt.legend()
plt.xlabel("Frequency (THz)")
plt.ylabel("|group-velocity| (THz.A)")
plt.show()
|
Add an API example for NaCl
|
Add an API example for NaCl
|
Python
|
bsd-3-clause
|
atztogo/phonopy,atztogo/phonopy,atztogo/phonopy,atztogo/phonopy
|
Add an API example for NaCl
|
import numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib.pyplot as plt
unitcell = read_vasp("POSCAR")
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
phonon.set_nac_params(nac_params)
phonon.set_group_velocity()
phonon.set_mesh([31, 31, 31])
qpoints, weights, frequencies, _ = phonon.get_mesh()
group_velocity = phonon.get_group_velocity()
gv_norm = np.sqrt((group_velocity ** 2).sum(axis=2))
for i, (f, g) in enumerate(zip(frequencies.T, gv_norm.T)):
plt.plot(f, g, 'o', label=('band%d' % (i + 1)))
plt.legend()
plt.xlabel("Frequency (THz)")
plt.ylabel("|group-velocity| (THz.A)")
plt.show()
|
<commit_before><commit_msg>Add an API example for NaCl<commit_after>
|
import numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib.pyplot as plt
unitcell = read_vasp("POSCAR")
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
phonon.set_nac_params(nac_params)
phonon.set_group_velocity()
phonon.set_mesh([31, 31, 31])
qpoints, weights, frequencies, _ = phonon.get_mesh()
group_velocity = phonon.get_group_velocity()
gv_norm = np.sqrt((group_velocity ** 2).sum(axis=2))
for i, (f, g) in enumerate(zip(frequencies.T, gv_norm.T)):
plt.plot(f, g, 'o', label=('band%d' % (i + 1)))
plt.legend()
plt.xlabel("Frequency (THz)")
plt.ylabel("|group-velocity| (THz.A)")
plt.show()
|
Add an API example for NaClimport numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib.pyplot as plt
unitcell = read_vasp("POSCAR")
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
phonon.set_nac_params(nac_params)
phonon.set_group_velocity()
phonon.set_mesh([31, 31, 31])
qpoints, weights, frequencies, _ = phonon.get_mesh()
group_velocity = phonon.get_group_velocity()
gv_norm = np.sqrt((group_velocity ** 2).sum(axis=2))
for i, (f, g) in enumerate(zip(frequencies.T, gv_norm.T)):
plt.plot(f, g, 'o', label=('band%d' % (i + 1)))
plt.legend()
plt.xlabel("Frequency (THz)")
plt.ylabel("|group-velocity| (THz.A)")
plt.show()
|
<commit_before><commit_msg>Add an API example for NaCl<commit_after>import numpy as np
from phonopy import Phonopy
from phonopy.interface.vasp import read_vasp
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib.pyplot as plt
unitcell = read_vasp("POSCAR")
phonon = Phonopy(unitcell,
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]],
primitive_matrix=[[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
phonon.set_nac_params(nac_params)
phonon.set_group_velocity()
phonon.set_mesh([31, 31, 31])
qpoints, weights, frequencies, _ = phonon.get_mesh()
group_velocity = phonon.get_group_velocity()
gv_norm = np.sqrt((group_velocity ** 2).sum(axis=2))
for i, (f, g) in enumerate(zip(frequencies.T, gv_norm.T)):
plt.plot(f, g, 'o', label=('band%d' % (i + 1)))
plt.legend()
plt.xlabel("Frequency (THz)")
plt.ylabel("|group-velocity| (THz.A)")
plt.show()
|
|
1c951a5aa3ff604177087e847faaeca1570bd898
|
experiments/keras_tensorflow/kuza55/original_examples/kuza55_blog_example.py
|
experiments/keras_tensorflow/kuza55/original_examples/kuza55_blog_example.py
|
# Multi-GPU Keras training.
# Captured from https://github.com/kuza55/keras-extras and
# https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012.
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4000, input_dim=8000, activation='tanh'))
model.add(Dense(2000, input_dim=8000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print (model.summary())
model = make_parallel(model, 4)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x = np.random.rand(131072, 8000)
y = np.random.randint(0, 2, (131072, 1))
model.fit(x, y, batch_size=2048*4)
|
Add an example from kuza55.
|
Add an example from kuza55.
|
Python
|
mit
|
rossumai/keras-multi-gpu,rossumai/keras-multi-gpu
|
Add an example from kuza55.
|
# Multi-GPU Keras training.
# Captured from https://github.com/kuza55/keras-extras and
# https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012.
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4000, input_dim=8000, activation='tanh'))
model.add(Dense(2000, input_dim=8000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print (model.summary())
model = make_parallel(model, 4)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x = np.random.rand(131072, 8000)
y = np.random.randint(0, 2, (131072, 1))
model.fit(x, y, batch_size=2048*4)
|
<commit_before><commit_msg>Add an example from kuza55.<commit_after>
|
# Multi-GPU Keras training.
# Captured from https://github.com/kuza55/keras-extras and
# https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012.
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4000, input_dim=8000, activation='tanh'))
model.add(Dense(2000, input_dim=8000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print (model.summary())
model = make_parallel(model, 4)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x = np.random.rand(131072, 8000)
y = np.random.randint(0, 2, (131072, 1))
model.fit(x, y, batch_size=2048*4)
|
Add an example from kuza55.# Multi-GPU Keras training.
# Captured from https://github.com/kuza55/keras-extras and
# https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012.
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4000, input_dim=8000, activation='tanh'))
model.add(Dense(2000, input_dim=8000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print (model.summary())
model = make_parallel(model, 4)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x = np.random.rand(131072, 8000)
y = np.random.randint(0, 2, (131072, 1))
model.fit(x, y, batch_size=2048*4)
|
<commit_before><commit_msg>Add an example from kuza55.<commit_after># Multi-GPU Keras training.
# Captured from https://github.com/kuza55/keras-extras and
# https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012.
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([ shape[:1] // parts, shape[1:] ],axis=0)
stride = tf.concat([ shape[:1] // parts, shape[1:]*0 ],axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
#Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
#Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
#Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(4000, input_dim=8000, activation='tanh'))
model.add(Dense(2000, input_dim=8000, activation='relu'))
model.add(Dense(500, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print (model.summary())
model = make_parallel(model, 4)
optimizer = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x = np.random.rand(131072, 8000)
y = np.random.randint(0, 2, (131072, 1))
model.fit(x, y, batch_size=2048*4)
|
|
e6cf9cb9d27523fd72242f6ea137d14bff5f2039
|
interface/plugin/farmanager/02title/__init__.py
|
interface/plugin/farmanager/02title/__init__.py
|
"""
Gets plugin info from global fields
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
__title__ = "02fields"
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__license__ = "Public Domain"
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = __title__ # should be set and non-empty
info["Author"] = __author__ # should be set and non-empty
# first string from file comment
desc = __doc__.strip().splitlines()[0]
info["Description"] = desc # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
""" Called by Far Manager to add item into Plugin commands menu (F11) """
info["MenuString"] = __title__
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
Add 02title/ plugin that gets own info from global fields
|
Add 02title/ plugin that gets own info from global fields
|
Python
|
unlicense
|
techtonik/discovery,techtonik/discovery,techtonik/discovery
|
Add 02title/ plugin that gets own info from global fields
|
"""
Gets plugin info from global fields
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
__title__ = "02fields"
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__license__ = "Public Domain"
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = __title__ # should be set and non-empty
info["Author"] = __author__ # should be set and non-empty
# first string from file comment
desc = __doc__.strip().splitlines()[0]
info["Description"] = desc # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
""" Called by Far Manager to add item into Plugin commands menu (F11) """
info["MenuString"] = __title__
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
<commit_before><commit_msg>Add 02title/ plugin that gets own info from global fields<commit_after>
|
"""
Gets plugin info from global fields
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
__title__ = "02fields"
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__license__ = "Public Domain"
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = __title__ # should be set and non-empty
info["Author"] = __author__ # should be set and non-empty
# first string from file comment
desc = __doc__.strip().splitlines()[0]
info["Description"] = desc # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
""" Called by Far Manager to add item into Plugin commands menu (F11) """
info["MenuString"] = __title__
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
Add 02title/ plugin that gets own info from global fields"""
Gets plugin info from global fields
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
__title__ = "02fields"
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__license__ = "Public Domain"
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = __title__ # should be set and non-empty
info["Author"] = __author__ # should be set and non-empty
# first string from file comment
desc = __doc__.strip().splitlines()[0]
info["Description"] = desc # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
""" Called by Far Manager to add item into Plugin commands menu (F11) """
info["MenuString"] = __title__
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
<commit_before><commit_msg>Add 02title/ plugin that gets own info from global fields<commit_after>"""
Gets plugin info from global fields
Low-level Far Manager API is here:
* https://api.farmanager.com/en/exported_functions/getglobalinfow.html
"""
__title__ = "02fields"
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__license__ = "Public Domain"
# --- utility functions ---
import hashlib
def getuuid(data):
"""Generate UUID from `data` string"""
if type(data) != bytes:
data = data.encode('utf-8')
h = hashlib.sha256(data).hexdigest()[:32].upper()
for i, pos in enumerate([8, 12, 16, 20]):
h = h[:i+pos] + '-' + h[i+pos:]
return h
# --- plugin interface
def GetGlobalInfoW(info):
""" Called by Far Manager, plugin needs to fill the info """
info["Title"] = __title__ # should be set and non-empty
info["Author"] = __author__ # should be set and non-empty
# first string from file comment
desc = __doc__.strip().splitlines()[0]
info["Description"] = desc # should be set
info["Guid"] = getuuid(info["Title"])
def GetPluginInfoW(info):
""" Called by Far Manager to add item into Plugin commands menu (F11) """
info["MenuString"] = __title__
info["Guid"] = getuuid(info["MenuString"])
def OpenW(info):
print("[open] " + __file__)
|
|
2bfe7b5324ef79cc60b81ffc392bd2dd1b7b2bc5
|
src/tests/templateedit.py
|
src/tests/templateedit.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from oabot.main import *
class TemplateEditTests(unittest.TestCase):
def propose_change(self, text, page_name='Test page'):
wikicode = mwparserfromhell.parse(text)
for template in wikicode.filter_templates():
edit = TemplateEdit(template, page_name)
edit.propose_change()
return edit
def test_add_arxiv(self):
edit = self.propose_change("""
{{Cite journal|last=Prpić|first=John|last2=Shukla|first2=Prashant P.|last3=Kietzmann|first3=Jan H.|last4=McCarthy|first4=Ian P.|date=2015-01-01|title=How to work a crowd: Developing crowd capital through
crowdsourcing|url=http://www.sciencedirect.com/science/article/pii/S0007681314001438|journal=Business Horizons|volume=58|issue=1|pages=77–85|doi=10.1016/j.bushor.2014.09.005}}
""")
self.assertEquals("arxiv=1702.04214", edit.proposed_change)
|
Add test utilities for proposed changes - feel free to add other cases
|
Add test utilities for proposed changes - feel free to add other cases
|
Python
|
mit
|
dissemin/oabot,dissemin/oabot,dissemin/oabot
|
Add test utilities for proposed changes - feel free to add other cases
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from oabot.main import *
class TemplateEditTests(unittest.TestCase):
def propose_change(self, text, page_name='Test page'):
wikicode = mwparserfromhell.parse(text)
for template in wikicode.filter_templates():
edit = TemplateEdit(template, page_name)
edit.propose_change()
return edit
def test_add_arxiv(self):
edit = self.propose_change("""
{{Cite journal|last=Prpić|first=John|last2=Shukla|first2=Prashant P.|last3=Kietzmann|first3=Jan H.|last4=McCarthy|first4=Ian P.|date=2015-01-01|title=How to work a crowd: Developing crowd capital through
crowdsourcing|url=http://www.sciencedirect.com/science/article/pii/S0007681314001438|journal=Business Horizons|volume=58|issue=1|pages=77–85|doi=10.1016/j.bushor.2014.09.005}}
""")
self.assertEquals("arxiv=1702.04214", edit.proposed_change)
|
<commit_before><commit_msg>Add test utilities for proposed changes - feel free to add other cases<commit_after>
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from oabot.main import *
class TemplateEditTests(unittest.TestCase):
def propose_change(self, text, page_name='Test page'):
wikicode = mwparserfromhell.parse(text)
for template in wikicode.filter_templates():
edit = TemplateEdit(template, page_name)
edit.propose_change()
return edit
def test_add_arxiv(self):
edit = self.propose_change("""
{{Cite journal|last=Prpić|first=John|last2=Shukla|first2=Prashant P.|last3=Kietzmann|first3=Jan H.|last4=McCarthy|first4=Ian P.|date=2015-01-01|title=How to work a crowd: Developing crowd capital through
crowdsourcing|url=http://www.sciencedirect.com/science/article/pii/S0007681314001438|journal=Business Horizons|volume=58|issue=1|pages=77–85|doi=10.1016/j.bushor.2014.09.005}}
""")
self.assertEquals("arxiv=1702.04214", edit.proposed_change)
|
Add test utilities for proposed changes - feel free to add other cases# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from oabot.main import *
class TemplateEditTests(unittest.TestCase):
def propose_change(self, text, page_name='Test page'):
wikicode = mwparserfromhell.parse(text)
for template in wikicode.filter_templates():
edit = TemplateEdit(template, page_name)
edit.propose_change()
return edit
def test_add_arxiv(self):
edit = self.propose_change("""
{{Cite journal|last=Prpić|first=John|last2=Shukla|first2=Prashant P.|last3=Kietzmann|first3=Jan H.|last4=McCarthy|first4=Ian P.|date=2015-01-01|title=How to work a crowd: Developing crowd capital through
crowdsourcing|url=http://www.sciencedirect.com/science/article/pii/S0007681314001438|journal=Business Horizons|volume=58|issue=1|pages=77–85|doi=10.1016/j.bushor.2014.09.005}}
""")
self.assertEquals("arxiv=1702.04214", edit.proposed_change)
|
<commit_before><commit_msg>Add test utilities for proposed changes - feel free to add other cases<commit_after># -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from oabot.main import *
class TemplateEditTests(unittest.TestCase):
def propose_change(self, text, page_name='Test page'):
wikicode = mwparserfromhell.parse(text)
for template in wikicode.filter_templates():
edit = TemplateEdit(template, page_name)
edit.propose_change()
return edit
def test_add_arxiv(self):
edit = self.propose_change("""
{{Cite journal|last=Prpić|first=John|last2=Shukla|first2=Prashant P.|last3=Kietzmann|first3=Jan H.|last4=McCarthy|first4=Ian P.|date=2015-01-01|title=How to work a crowd: Developing crowd capital through
crowdsourcing|url=http://www.sciencedirect.com/science/article/pii/S0007681314001438|journal=Business Horizons|volume=58|issue=1|pages=77–85|doi=10.1016/j.bushor.2014.09.005}}
""")
self.assertEquals("arxiv=1702.04214", edit.proposed_change)
|
|
52accd6f82893fe167a78eabe8cdf30dfa0bc841
|
glance/tests/unit/test_data_migration_version.py
|
glance/tests/unit/test_data_migration_version.py
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = six.text_type(CURRENT_RELEASE[:1].upper()).encode(
'ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
|
Add test for data migration version
|
Add test for data migration version
Detect when the CURRENT_VERSION constant used by the database
migration code is out of sync with the version number of the
current release.
Change-Id: I9e8b5ee98c6c111aaf065cb310906d6442f5f79a
|
Python
|
apache-2.0
|
openstack/glance,openstack/glance,openstack/glance
|
Add test for data migration version
Detect when the CURRENT_VERSION constant used by the database
migration code is out of sync with the version number of the
current release.
Change-Id: I9e8b5ee98c6c111aaf065cb310906d6442f5f79a
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = six.text_type(CURRENT_RELEASE[:1].upper()).encode(
'ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
|
<commit_before><commit_msg>Add test for data migration version
Detect when the CURRENT_VERSION constant used by the database
migration code is out of sync with the version number of the
current release.
Change-Id: I9e8b5ee98c6c111aaf065cb310906d6442f5f79a<commit_after>
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = six.text_type(CURRENT_RELEASE[:1].upper()).encode(
'ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
|
Add test for data migration version
Detect when the CURRENT_VERSION constant used by the database
migration code is out of sync with the version number of the
current release.
Change-Id: I9e8b5ee98c6c111aaf065cb310906d6442f5f79a# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = six.text_type(CURRENT_RELEASE[:1].upper()).encode(
'ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
|
<commit_before><commit_msg>Add test for data migration version
Detect when the CURRENT_VERSION constant used by the database
migration code is out of sync with the version number of the
current release.
Change-Id: I9e8b5ee98c6c111aaf065cb310906d6442f5f79a<commit_after># Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from glance.db.migration import CURRENT_RELEASE
from glance.version import version_info
class TestDataMigrationVersion(testtools.TestCase):
def test_migration_version(self):
"""Make sure the data migration version info has been updated."""
release_number = int(version_info.version_string().split('.', 1)[0])
# by rule, release names must be composed of the 26 letters of the
# ISO Latin alphabet (ord('A')==65, ord('Z')==90)
release_letter = six.text_type(CURRENT_RELEASE[:1].upper()).encode(
'ascii')
# Convert release letter into an int in [1:26]. The first
# glance release was 'Bexar'.
converted_release_letter = (ord(release_letter) -
ord(u'B'.encode('ascii')) + 1)
# Project the release number into [1:26]
converted_release_number = release_number % 26
# Prepare for the worst with a super-informative message
msg = ('\n\n'
'EMERGENCY!\n'
'glance.db.migration.CURRENT_RELEASE is out of sync '
'with the glance version.\n'
' CURRENT_RELEASE: %s\n'
' glance version: %s\n'
'glance.db.migration.CURRENT_RELEASE needs to be '
'updated IMMEDIATELY.\n'
'The gate will be wedged until the update is made.\n'
'EMERGENCY!\n'
'\n') % (CURRENT_RELEASE,
version_info.version_string())
self.assertEqual(converted_release_letter,
converted_release_number,
msg)
|
|
11d39551f85a1490ebe370b97ed729d85df06b0b
|
shuup/xtheme/migrations/0004_convert_shop_themes.py
|
shuup/xtheme/migrations/0004_convert_shop_themes.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-07 23:22
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.db import migrations
from shuup.core.models import Shop
from shuup.xtheme.models import SavedViewConfig, ThemeSettings
@atomic
def convert_shop_themes(*args):
for theme_setting in ThemeSettings.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if ThemeSettings.objects.filter(shop=shop, theme_identifier=theme_setting.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
theme_setting.pk = None
theme_setting.shop = shop
theme_setting.save()
for saved_config in SavedViewConfig.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if SavedViewConfig.objects.filter(shop=shop, theme_identifier=saved_config.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
saved_config.pk = None
saved_config.shop = shop
saved_config.save()
class Migration(migrations.Migration):
dependencies = [
('shuup_xtheme', '0003_shop_theme'),
]
operations = [
migrations.RunPython(convert_shop_themes, migrations.RunPython.noop)
]
|
Add migration to add shop information
|
Xtheme: Add migration to add shop information
Add shop information in ThemeSettings and SavedViewConfig
The process will clone every settings for each existent shop
|
Python
|
agpl-3.0
|
shoopio/shoop,shoopio/shoop,shoopio/shoop
|
Xtheme: Add migration to add shop information
Add shop information in ThemeSettings and SavedViewConfig
The process will clone every settings for each existent shop
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-07 23:22
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.db import migrations
from shuup.core.models import Shop
from shuup.xtheme.models import SavedViewConfig, ThemeSettings
@atomic
def convert_shop_themes(*args):
for theme_setting in ThemeSettings.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if ThemeSettings.objects.filter(shop=shop, theme_identifier=theme_setting.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
theme_setting.pk = None
theme_setting.shop = shop
theme_setting.save()
for saved_config in SavedViewConfig.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if SavedViewConfig.objects.filter(shop=shop, theme_identifier=saved_config.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
saved_config.pk = None
saved_config.shop = shop
saved_config.save()
class Migration(migrations.Migration):
dependencies = [
('shuup_xtheme', '0003_shop_theme'),
]
operations = [
migrations.RunPython(convert_shop_themes, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Xtheme: Add migration to add shop information
Add shop information in ThemeSettings and SavedViewConfig
The process will clone every settings for each existent shop<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-07 23:22
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.db import migrations
from shuup.core.models import Shop
from shuup.xtheme.models import SavedViewConfig, ThemeSettings
@atomic
def convert_shop_themes(*args):
for theme_setting in ThemeSettings.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if ThemeSettings.objects.filter(shop=shop, theme_identifier=theme_setting.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
theme_setting.pk = None
theme_setting.shop = shop
theme_setting.save()
for saved_config in SavedViewConfig.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if SavedViewConfig.objects.filter(shop=shop, theme_identifier=saved_config.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
saved_config.pk = None
saved_config.shop = shop
saved_config.save()
class Migration(migrations.Migration):
dependencies = [
('shuup_xtheme', '0003_shop_theme'),
]
operations = [
migrations.RunPython(convert_shop_themes, migrations.RunPython.noop)
]
|
Xtheme: Add migration to add shop information
Add shop information in ThemeSettings and SavedViewConfig
The process will clone every settings for each existent shop# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-07 23:22
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.db import migrations
from shuup.core.models import Shop
from shuup.xtheme.models import SavedViewConfig, ThemeSettings
@atomic
def convert_shop_themes(*args):
for theme_setting in ThemeSettings.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if ThemeSettings.objects.filter(shop=shop, theme_identifier=theme_setting.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
theme_setting.pk = None
theme_setting.shop = shop
theme_setting.save()
for saved_config in SavedViewConfig.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if SavedViewConfig.objects.filter(shop=shop, theme_identifier=saved_config.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
saved_config.pk = None
saved_config.shop = shop
saved_config.save()
class Migration(migrations.Migration):
dependencies = [
('shuup_xtheme', '0003_shop_theme'),
]
operations = [
migrations.RunPython(convert_shop_themes, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Xtheme: Add migration to add shop information
Add shop information in ThemeSettings and SavedViewConfig
The process will clone every settings for each existent shop<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-07 23:22
from __future__ import unicode_literals
from django.db.transaction import atomic
from django.db import migrations
from shuup.core.models import Shop
from shuup.xtheme.models import SavedViewConfig, ThemeSettings
@atomic
def convert_shop_themes(*args):
for theme_setting in ThemeSettings.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if ThemeSettings.objects.filter(shop=shop, theme_identifier=theme_setting.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
theme_setting.pk = None
theme_setting.shop = shop
theme_setting.save()
for saved_config in SavedViewConfig.objects.filter(shop__isnull=True):
for (index, shop) in enumerate(Shop.objects.all()):
# already exists.. ignore
if SavedViewConfig.objects.filter(shop=shop, theme_identifier=saved_config.theme_identifier).exists():
continue
# the first shop received the original object, the other, are just copies
if index > 0:
saved_config.pk = None
saved_config.shop = shop
saved_config.save()
class Migration(migrations.Migration):
dependencies = [
('shuup_xtheme', '0003_shop_theme'),
]
operations = [
migrations.RunPython(convert_shop_themes, migrations.RunPython.noop)
]
|
|
b055b5094eaca3191d066749ea6aff16dd6b9867
|
nova/tests/unit/policies/test_console_auth_tokens.py
|
nova/tests/unit/policies/test_console_auth_tokens.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
"""Test Console Auth Tokens APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensPolicyTest, self).setUp()
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
# Check that admin is able to get console connection information.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get console connection
# information.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller.show,
self.req, fakes.FAKE_UUID)
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
"""Test Console Auth Tokens APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
Add test coverage of existing os-console-auth-tokens policies
|
Add test coverage of existing os-console-auth-tokens policies
Current tests do not have good test coverage of existing policies.
Either tests for policies do not exist or if they exist then they
do not cover the actual negative and positive testing.
For Example, if any policy with default rule as admin only then
test should verify:
- policy check pass with context having admin or server owner
- policy check fail with context having non-admin and not server owner
As discussed in policy-defaults-refresh [1], to change the policies
with new default roles and scope_type, we need to have the enough
testing coverage of existing policy behavior.
When we will add the scope_type in policies or new default roles,
then these test coverage will be extended to adopt the new changes
and also make sure we do not break the existing behavior.
This commit covers the testing coverage of existing os-console-auth-tokens
policies.
Partial implement blueprint policy-defaults-refresh
[1] https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#testing
Change-Id: Iface875e2af24fd2dced0daa079bf7595762e999
|
Python
|
apache-2.0
|
mahak/nova,openstack/nova,mahak/nova,openstack/nova,klmitch/nova,openstack/nova,klmitch/nova,klmitch/nova,mahak/nova,klmitch/nova
|
Add test coverage of existing os-console-auth-tokens policies
Current tests do not have good test coverage of existing policies.
Either tests for policies do not exist or if they exist then they
do not cover the actual negative and positive testing.
For Example, if any policy with default rule as admin only then
test should verify:
- policy check pass with context having admin or server owner
- policy check fail with context having non-admin and not server owner
As discussed in policy-defaults-refresh [1], to change the policies
with new default roles and scope_type, we need to have the enough
testing coverage of existing policy behavior.
When we will add the scope_type in policies or new default roles,
then these test coverage will be extended to adopt the new changes
and also make sure we do not break the existing behavior.
This commit covers the testing coverage of existing os-console-auth-tokens
policies.
Partial implement blueprint policy-defaults-refresh
[1] https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#testing
Change-Id: Iface875e2af24fd2dced0daa079bf7595762e999
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
"""Test Console Auth Tokens APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensPolicyTest, self).setUp()
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
# Check that admin is able to get console connection information.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get console connection
# information.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller.show,
self.req, fakes.FAKE_UUID)
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
"""Test Console Auth Tokens APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
<commit_before><commit_msg>Add test coverage of existing os-console-auth-tokens policies
Current tests do not have good test coverage of existing policies.
Either tests for policies do not exist or if they exist then they
do not cover the actual negative and positive testing.
For Example, if any policy with default rule as admin only then
test should verify:
- policy check pass with context having admin or server owner
- policy check fail with context having non-admin and not server owner
As discussed in policy-defaults-refresh [1], to change the policies
with new default roles and scope_type, we need to have the enough
testing coverage of existing policy behavior.
When we will add the scope_type in policies or new default roles,
then these test coverage will be extended to adopt the new changes
and also make sure we do not break the existing behavior.
This commit covers the testing coverage of existing os-console-auth-tokens
policies.
Partial implement blueprint policy-defaults-refresh
[1] https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#testing
Change-Id: Iface875e2af24fd2dced0daa079bf7595762e999<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
"""Test Console Auth Tokens APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensPolicyTest, self).setUp()
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
# Check that admin is able to get console connection information.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get console connection
# information.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller.show,
self.req, fakes.FAKE_UUID)
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
"""Test Console Auth Tokens APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
Add test coverage of existing os-console-auth-tokens policies
Current tests do not have good test coverage of existing policies.
Either tests for policies do not exist or if they exist then they
do not cover the actual negative and positive testing.
For Example, if any policy with default rule as admin only then
test should verify:
- policy check pass with context having admin or server owner
- policy check fail with context having non-admin and not server owner
As discussed in policy-defaults-refresh [1], to change the policies
with new default roles and scope_type, we need to have the enough
testing coverage of existing policy behavior.
When we will add the scope_type in policies or new default roles,
then these test coverage will be extended to adopt the new changes
and also make sure we do not break the existing behavior.
This commit covers the testing coverage of existing os-console-auth-tokens
policies.
Partial implement blueprint policy-defaults-refresh
[1] https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#testing
Change-Id: Iface875e2af24fd2dced0daa079bf7595762e999# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
"""Test Console Auth Tokens APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensPolicyTest, self).setUp()
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
# Check that admin is able to get console connection information.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get console connection
# information.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller.show,
self.req, fakes.FAKE_UUID)
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
"""Test Console Auth Tokens APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
<commit_before><commit_msg>Add test coverage of existing os-console-auth-tokens policies
Current tests do not have good test coverage of existing policies.
Either tests for policies do not exist or if they exist then they
do not cover the actual negative and positive testing.
For Example, if any policy with default rule as admin only then
test should verify:
- policy check pass with context having admin or server owner
- policy check fail with context having non-admin and not server owner
As discussed in policy-defaults-refresh [1], to change the policies
with new default roles and scope_type, we need to have the enough
testing coverage of existing policy behavior.
When we will add the scope_type in policies or new default roles,
then these test coverage will be extended to adopt the new changes
and also make sure we do not break the existing behavior.
This commit covers the testing coverage of existing os-console-auth-tokens
policies.
Partial implement blueprint policy-defaults-refresh
[1] https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#testing
Change-Id: Iface875e2af24fd2dced0daa079bf7595762e999<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
"""Test Console Auth Tokens APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensPolicyTest, self).setUp()
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
# Check that admin is able to get console connection information.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get console connection
# information.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller.show,
self.req, fakes.FAKE_UUID)
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
"""Test Console Auth Tokens APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
|
babbc80437df31b61973576065296b4eb749e243
|
src/analyses/migrations/0002_auto_20160616_1724.py
|
src/analyses/migrations/0002_auto_20160616_1724.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='genomereference',
name='newer_reference',
field=models.ForeignKey(blank=True, help_text='If new version of the genome reference of the same organism and source exists, link the reference here. For example, UCSC mm9 can set its newer reference to UCSC mm10.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='analyses.GenomeReference', verbose_name='newer version'),
),
]
|
Update genome reference model migration
|
Update genome reference model migration
|
Python
|
mit
|
ccwang002/biocloud-server-kai,ccwang002/biocloud-server-kai,ccwang002/biocloud-server-kai
|
Update genome reference model migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='genomereference',
name='newer_reference',
field=models.ForeignKey(blank=True, help_text='If new version of the genome reference of the same organism and source exists, link the reference here. For example, UCSC mm9 can set its newer reference to UCSC mm10.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='analyses.GenomeReference', verbose_name='newer version'),
),
]
|
<commit_before><commit_msg>Update genome reference model migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='genomereference',
name='newer_reference',
field=models.ForeignKey(blank=True, help_text='If new version of the genome reference of the same organism and source exists, link the reference here. For example, UCSC mm9 can set its newer reference to UCSC mm10.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='analyses.GenomeReference', verbose_name='newer version'),
),
]
|
Update genome reference model migration# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='genomereference',
name='newer_reference',
field=models.ForeignKey(blank=True, help_text='If new version of the genome reference of the same organism and source exists, link the reference here. For example, UCSC mm9 can set its newer reference to UCSC mm10.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='analyses.GenomeReference', verbose_name='newer version'),
),
]
|
<commit_before><commit_msg>Update genome reference model migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 17:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='genomereference',
name='newer_reference',
field=models.ForeignKey(blank=True, help_text='If new version of the genome reference of the same organism and source exists, link the reference here. For example, UCSC mm9 can set its newer reference to UCSC mm10.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='analyses.GenomeReference', verbose_name='newer version'),
),
]
|
|
39327cb18d3551618e84aaa2f70f5e3baaf734de
|
tests/terminal_tests/CorrectGrammarHandlingTest.py
|
tests/terminal_tests/CorrectGrammarHandlingTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class CorrectGrammarHandlingTest(TestCase):
def test_sameNumber(self):
ter1 = Terminal(0, 1)
ter2 = Terminal(0, 1)
self.assertEqual(ter1, ter2)
def test_sameString(self):
ter1 = Terminal(0, 'a')
ter2 = Terminal(0, 'a')
self.assertEqual(ter1, ter2)
def test_sameClass(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass)
self.assertEqual(ter1, ter2)
def test_sameInstance(self):
inst = TempClass()
ter1 = Terminal(0, inst)
ter2 = Terminal(0, inst)
self.assertEqual(ter1, ter2)
|
Add test for correct handling of grammar in terminal
|
Add test for correct handling of grammar in terminal
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add test for correct handling of grammar in terminal
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class CorrectGrammarHandlingTest(TestCase):
def test_sameNumber(self):
ter1 = Terminal(0, 1)
ter2 = Terminal(0, 1)
self.assertEqual(ter1, ter2)
def test_sameString(self):
ter1 = Terminal(0, 'a')
ter2 = Terminal(0, 'a')
self.assertEqual(ter1, ter2)
def test_sameClass(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass)
self.assertEqual(ter1, ter2)
def test_sameInstance(self):
inst = TempClass()
ter1 = Terminal(0, inst)
ter2 = Terminal(0, inst)
self.assertEqual(ter1, ter2)
|
<commit_before><commit_msg>Add test for correct handling of grammar in terminal<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class CorrectGrammarHandlingTest(TestCase):
def test_sameNumber(self):
ter1 = Terminal(0, 1)
ter2 = Terminal(0, 1)
self.assertEqual(ter1, ter2)
def test_sameString(self):
ter1 = Terminal(0, 'a')
ter2 = Terminal(0, 'a')
self.assertEqual(ter1, ter2)
def test_sameClass(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass)
self.assertEqual(ter1, ter2)
def test_sameInstance(self):
inst = TempClass()
ter1 = Terminal(0, inst)
ter2 = Terminal(0, inst)
self.assertEqual(ter1, ter2)
|
Add test for correct handling of grammar in terminal#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class CorrectGrammarHandlingTest(TestCase):
def test_sameNumber(self):
ter1 = Terminal(0, 1)
ter2 = Terminal(0, 1)
self.assertEqual(ter1, ter2)
def test_sameString(self):
ter1 = Terminal(0, 'a')
ter2 = Terminal(0, 'a')
self.assertEqual(ter1, ter2)
def test_sameClass(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass)
self.assertEqual(ter1, ter2)
def test_sameInstance(self):
inst = TempClass()
ter1 = Terminal(0, inst)
ter2 = Terminal(0, inst)
self.assertEqual(ter1, ter2)
|
<commit_before><commit_msg>Add test for correct handling of grammar in terminal<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class CorrectGrammarHandlingTest(TestCase):
def test_sameNumber(self):
ter1 = Terminal(0, 1)
ter2 = Terminal(0, 1)
self.assertEqual(ter1, ter2)
def test_sameString(self):
ter1 = Terminal(0, 'a')
ter2 = Terminal(0, 'a')
self.assertEqual(ter1, ter2)
def test_sameClass(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass)
self.assertEqual(ter1, ter2)
def test_sameInstance(self):
inst = TempClass()
ter1 = Terminal(0, inst)
ter2 = Terminal(0, inst)
self.assertEqual(ter1, ter2)
|
|
42851304fecec95c32eebd618aae192743aab5d4
|
src/ggrc_risk_assessment_v2/migrations/versions/20141121231826_1347acbb4dc2_add_unique_constraint_to_threat_actors.py
|
src/ggrc_risk_assessment_v2/migrations/versions/20141121231826_1347acbb4dc2_add_unique_constraint_to_threat_actors.py
|
"""Add unique constraint to threat actors
Revision ID: 1347acbb4dc2
Revises: 5ada65dc60b3
Create Date: 2014-11-21 23:18:26.689048
"""
# revision identifiers, used by Alembic.
revision = '1347acbb4dc2'
down_revision = '5ada65dc60b3'
from alembic import op
def upgrade():
op.create_unique_constraint('uq_t_actors', 'threat_actors', ['title'])
def downgrade():
op.drop_constraint('uq_t_actors', 'threat_actors', 'unique')
|
Add unique constraint on Threat Actor title
|
Add unique constraint on Threat Actor title
|
Python
|
apache-2.0
|
prasannav7/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,hasanalom/ggrc-core
|
Add unique constraint on Threat Actor title
|
"""Add unique constraint to threat actors
Revision ID: 1347acbb4dc2
Revises: 5ada65dc60b3
Create Date: 2014-11-21 23:18:26.689048
"""
# revision identifiers, used by Alembic.
revision = '1347acbb4dc2'
down_revision = '5ada65dc60b3'
from alembic import op
def upgrade():
op.create_unique_constraint('uq_t_actors', 'threat_actors', ['title'])
def downgrade():
op.drop_constraint('uq_t_actors', 'threat_actors', 'unique')
|
<commit_before><commit_msg>Add unique constraint on Threat Actor title<commit_after>
|
"""Add unique constraint to threat actors
Revision ID: 1347acbb4dc2
Revises: 5ada65dc60b3
Create Date: 2014-11-21 23:18:26.689048
"""
# revision identifiers, used by Alembic.
revision = '1347acbb4dc2'
down_revision = '5ada65dc60b3'
from alembic import op
def upgrade():
op.create_unique_constraint('uq_t_actors', 'threat_actors', ['title'])
def downgrade():
op.drop_constraint('uq_t_actors', 'threat_actors', 'unique')
|
Add unique constraint on Threat Actor title
"""Add unique constraint to threat actors
Revision ID: 1347acbb4dc2
Revises: 5ada65dc60b3
Create Date: 2014-11-21 23:18:26.689048
"""
# revision identifiers, used by Alembic.
revision = '1347acbb4dc2'
down_revision = '5ada65dc60b3'
from alembic import op
def upgrade():
op.create_unique_constraint('uq_t_actors', 'threat_actors', ['title'])
def downgrade():
op.drop_constraint('uq_t_actors', 'threat_actors', 'unique')
|
<commit_before><commit_msg>Add unique constraint on Threat Actor title<commit_after>
"""Add unique constraint to threat actors
Revision ID: 1347acbb4dc2
Revises: 5ada65dc60b3
Create Date: 2014-11-21 23:18:26.689048
"""
# revision identifiers, used by Alembic.
revision = '1347acbb4dc2'
down_revision = '5ada65dc60b3'
from alembic import op
def upgrade():
op.create_unique_constraint('uq_t_actors', 'threat_actors', ['title'])
def downgrade():
op.drop_constraint('uq_t_actors', 'threat_actors', 'unique')
|
|
66e6ba2988aee0cf9301f735e170b7f74ca310ac
|
qcfractal/alembic/versions/4bb79efa9855_add_queue_manager_id_to_base_results.py
|
qcfractal/alembic/versions/4bb79efa9855_add_queue_manager_id_to_base_results.py
|
"""Add queue_manager id to Base_results
Revision ID: 4bb79efa9855
Revises: 26cfd7b0439e
Create Date: 2019-07-26 15:31:14.811337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bb79efa9855'
down_revision = '26cfd7b0439e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base_result', sa.Column('manager_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'base_result', 'queue_manager', ['manager_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'base_result', type_='foreignkey')
op.drop_column('base_result', 'manager_id')
# ### end Alembic commands ###
|
Add db migration to add manager_id to base_results
|
Add db migration to add manager_id to base_results
|
Python
|
bsd-3-clause
|
psi4/mongo_qcdb,psi4/DatenQM,psi4/mongo_qcdb,psi4/DatenQM
|
Add db migration to add manager_id to base_results
|
"""Add queue_manager id to Base_results
Revision ID: 4bb79efa9855
Revises: 26cfd7b0439e
Create Date: 2019-07-26 15:31:14.811337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bb79efa9855'
down_revision = '26cfd7b0439e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base_result', sa.Column('manager_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'base_result', 'queue_manager', ['manager_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'base_result', type_='foreignkey')
op.drop_column('base_result', 'manager_id')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration to add manager_id to base_results<commit_after>
|
"""Add queue_manager id to Base_results
Revision ID: 4bb79efa9855
Revises: 26cfd7b0439e
Create Date: 2019-07-26 15:31:14.811337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bb79efa9855'
down_revision = '26cfd7b0439e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base_result', sa.Column('manager_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'base_result', 'queue_manager', ['manager_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'base_result', type_='foreignkey')
op.drop_column('base_result', 'manager_id')
# ### end Alembic commands ###
|
Add db migration to add manager_id to base_results"""Add queue_manager id to Base_results
Revision ID: 4bb79efa9855
Revises: 26cfd7b0439e
Create Date: 2019-07-26 15:31:14.811337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bb79efa9855'
down_revision = '26cfd7b0439e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base_result', sa.Column('manager_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'base_result', 'queue_manager', ['manager_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'base_result', type_='foreignkey')
op.drop_column('base_result', 'manager_id')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration to add manager_id to base_results<commit_after>"""Add queue_manager id to Base_results
Revision ID: 4bb79efa9855
Revises: 26cfd7b0439e
Create Date: 2019-07-26 15:31:14.811337
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4bb79efa9855'
down_revision = '26cfd7b0439e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('base_result', sa.Column('manager_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'base_result', 'queue_manager', ['manager_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'base_result', type_='foreignkey')
op.drop_column('base_result', 'manager_id')
# ### end Alembic commands ###
|
|
807bb8425665e1cdd9040dfdd830603b95e9ab0a
|
tests/test_inheritance.py
|
tests/test_inheritance.py
|
from dtest import *
from dtest.util import *
# Define setUpClass/tearDownClass/setUp/tearDown for inheritance
class TestInheritanceBase(DTestCase):
class_setup = None
instance_setup = None
@classmethod
def setUpClass(cls):
assert_is_none(cls.class_setup)
cls.class_setup = True
@classmethod
def tearDownClass(cls):
assert_false(cls.class_setup)
def setUp(self):
assert_is_none(self.instance_setup)
self.instance_setup = True
def tearDown(self):
assert_false(self.instance_setup)
# See if we inherited them
class TestInheritance(TestInheritanceBase):
def test_inheritance(self):
assert_true(self.__class__.class_setup)
assert_true(self.instance_setup)
self.__class__.class_setup = False
self.instance_setup = False
|
Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works as expected
|
Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works
as expected
|
Python
|
apache-2.0
|
klmitch/dtest,klmitch/dtest
|
Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works
as expected
|
from dtest import *
from dtest.util import *
# Define setUpClass/tearDownClass/setUp/tearDown for inheritance
class TestInheritanceBase(DTestCase):
class_setup = None
instance_setup = None
@classmethod
def setUpClass(cls):
assert_is_none(cls.class_setup)
cls.class_setup = True
@classmethod
def tearDownClass(cls):
assert_false(cls.class_setup)
def setUp(self):
assert_is_none(self.instance_setup)
self.instance_setup = True
def tearDown(self):
assert_false(self.instance_setup)
# See if we inherited them
class TestInheritance(TestInheritanceBase):
def test_inheritance(self):
assert_true(self.__class__.class_setup)
assert_true(self.instance_setup)
self.__class__.class_setup = False
self.instance_setup = False
|
<commit_before><commit_msg>Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works
as expected<commit_after>
|
from dtest import *
from dtest.util import *
# Define setUpClass/tearDownClass/setUp/tearDown for inheritance
class TestInheritanceBase(DTestCase):
class_setup = None
instance_setup = None
@classmethod
def setUpClass(cls):
assert_is_none(cls.class_setup)
cls.class_setup = True
@classmethod
def tearDownClass(cls):
assert_false(cls.class_setup)
def setUp(self):
assert_is_none(self.instance_setup)
self.instance_setup = True
def tearDown(self):
assert_false(self.instance_setup)
# See if we inherited them
class TestInheritance(TestInheritanceBase):
def test_inheritance(self):
assert_true(self.__class__.class_setup)
assert_true(self.instance_setup)
self.__class__.class_setup = False
self.instance_setup = False
|
Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works
as expectedfrom dtest import *
from dtest.util import *
# Define setUpClass/tearDownClass/setUp/tearDown for inheritance
class TestInheritanceBase(DTestCase):
class_setup = None
instance_setup = None
@classmethod
def setUpClass(cls):
assert_is_none(cls.class_setup)
cls.class_setup = True
@classmethod
def tearDownClass(cls):
assert_false(cls.class_setup)
def setUp(self):
assert_is_none(self.instance_setup)
self.instance_setup = True
def tearDown(self):
assert_false(self.instance_setup)
# See if we inherited them
class TestInheritance(TestInheritanceBase):
def test_inheritance(self):
assert_true(self.__class__.class_setup)
assert_true(self.instance_setup)
self.__class__.class_setup = False
self.instance_setup = False
|
<commit_before><commit_msg>Test that inheritance of setUp/tearDown/setUpClass/tearDownClass works
as expected<commit_after>from dtest import *
from dtest.util import *
# Define setUpClass/tearDownClass/setUp/tearDown for inheritance
class TestInheritanceBase(DTestCase):
class_setup = None
instance_setup = None
@classmethod
def setUpClass(cls):
assert_is_none(cls.class_setup)
cls.class_setup = True
@classmethod
def tearDownClass(cls):
assert_false(cls.class_setup)
def setUp(self):
assert_is_none(self.instance_setup)
self.instance_setup = True
def tearDown(self):
assert_false(self.instance_setup)
# See if we inherited them
class TestInheritance(TestInheritanceBase):
def test_inheritance(self):
assert_true(self.__class__.class_setup)
assert_true(self.instance_setup)
self.__class__.class_setup = False
self.instance_setup = False
|
|
eda80dd9a903a7baaddad123978981352de6d337
|
project/app/migrations/0003_auto_20170311_0837.py
|
project/app/migrations/0003_auto_20170311_0837.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170311_0811'),
]
operations = [
migrations.AlterUniqueTogether(
name='session',
unique_together=set([]),
),
]
|
Remove constraint on Session model
|
Remove constraint on Session model
|
Python
|
bsd-2-clause
|
barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore,dbinetti/barberscore-django,barberscore/barberscore-api
|
Remove constraint on Session model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170311_0811'),
]
operations = [
migrations.AlterUniqueTogether(
name='session',
unique_together=set([]),
),
]
|
<commit_before><commit_msg>Remove constraint on Session model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170311_0811'),
]
operations = [
migrations.AlterUniqueTogether(
name='session',
unique_together=set([]),
),
]
|
Remove constraint on Session model# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170311_0811'),
]
operations = [
migrations.AlterUniqueTogether(
name='session',
unique_together=set([]),
),
]
|
<commit_before><commit_msg>Remove constraint on Session model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170311_0811'),
]
operations = [
migrations.AlterUniqueTogether(
name='session',
unique_together=set([]),
),
]
|
|
03f9355de0c25f41fa5ed78b81b0fd0ee988a117
|
py/kth-largest-element-in-an-array.py
|
py/kth-largest-element-in-an-array.py
|
import random
class Solution(object):
def findKthLargest(self, nums, k, start=0, end=None):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l = len(nums)
if end is None:
end = l
if end == start + 1:
return nums[start]
pivot = nums[random.randint(start, end - 1)]
i, j, n = start, start, end - 1
while j <= n:
if nums[j] < pivot:
if i != j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > pivot:
if j != n:
nums[j], nums[n] = nums[n], nums[j]
n -= 1
else:
j += 1
if i <= l - k < j:
return nums[-k]
if l - k < i:
return self.findKthLargest(nums, k, start, i)
else:
return self.findKthLargest(nums, k, j, end)
|
Add py solution for 215. Kth Largest Element in an Array
|
Add py solution for 215. Kth Largest Element in an Array
215. Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 215. Kth Largest Element in an Array
215. Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/
|
import random
class Solution(object):
def findKthLargest(self, nums, k, start=0, end=None):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l = len(nums)
if end is None:
end = l
if end == start + 1:
return nums[start]
pivot = nums[random.randint(start, end - 1)]
i, j, n = start, start, end - 1
while j <= n:
if nums[j] < pivot:
if i != j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > pivot:
if j != n:
nums[j], nums[n] = nums[n], nums[j]
n -= 1
else:
j += 1
if i <= l - k < j:
return nums[-k]
if l - k < i:
return self.findKthLargest(nums, k, start, i)
else:
return self.findKthLargest(nums, k, j, end)
|
<commit_before><commit_msg>Add py solution for 215. Kth Largest Element in an Array
215. Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/<commit_after>
|
import random
class Solution(object):
def findKthLargest(self, nums, k, start=0, end=None):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l = len(nums)
if end is None:
end = l
if end == start + 1:
return nums[start]
pivot = nums[random.randint(start, end - 1)]
i, j, n = start, start, end - 1
while j <= n:
if nums[j] < pivot:
if i != j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > pivot:
if j != n:
nums[j], nums[n] = nums[n], nums[j]
n -= 1
else:
j += 1
if i <= l - k < j:
return nums[-k]
if l - k < i:
return self.findKthLargest(nums, k, start, i)
else:
return self.findKthLargest(nums, k, j, end)
|
Add py solution for 215. Kth Largest Element in an Array
215. Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/import random
class Solution(object):
def findKthLargest(self, nums, k, start=0, end=None):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l = len(nums)
if end is None:
end = l
if end == start + 1:
return nums[start]
pivot = nums[random.randint(start, end - 1)]
i, j, n = start, start, end - 1
while j <= n:
if nums[j] < pivot:
if i != j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > pivot:
if j != n:
nums[j], nums[n] = nums[n], nums[j]
n -= 1
else:
j += 1
if i <= l - k < j:
return nums[-k]
if l - k < i:
return self.findKthLargest(nums, k, start, i)
else:
return self.findKthLargest(nums, k, j, end)
|
<commit_before><commit_msg>Add py solution for 215. Kth Largest Element in an Array
215. Kth Largest Element in an Array: https://leetcode.com/problems/kth-largest-element-in-an-array/<commit_after>import random
class Solution(object):
def findKthLargest(self, nums, k, start=0, end=None):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
l = len(nums)
if end is None:
end = l
if end == start + 1:
return nums[start]
pivot = nums[random.randint(start, end - 1)]
i, j, n = start, start, end - 1
while j <= n:
if nums[j] < pivot:
if i != j:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > pivot:
if j != n:
nums[j], nums[n] = nums[n], nums[j]
n -= 1
else:
j += 1
if i <= l - k < j:
return nums[-k]
if l - k < i:
return self.findKthLargest(nums, k, start, i)
else:
return self.findKthLargest(nums, k, j, end)
|
|
28208126b532edb64c97c63a8cbff841c8da3c4f
|
scripts/officediff/xlsx-dump.py
|
scripts/officediff/xlsx-dump.py
|
from io import StringIO
import sys
import pandas as pd
for sheet_name in pd.ExcelFile(sys.argv[1]).sheet_names:
output = StringIO()
print('Sheet: %s' % sheet_name)
pd.read_excel(sys.argv[1], sheet_name=sheet_name)\
.to_csv(output,
header=True,
index=False)
print(output.getvalue())
|
Add Python script for powerpoint file diffs
|
Add Python script for powerpoint file diffs
|
Python
|
mit
|
Stratus3D/dotfiles,Stratus3D/dotfiles,Stratus3D/dotfiles
|
Add Python script for powerpoint file diffs
|
from io import StringIO
import sys
import pandas as pd
for sheet_name in pd.ExcelFile(sys.argv[1]).sheet_names:
output = StringIO()
print('Sheet: %s' % sheet_name)
pd.read_excel(sys.argv[1], sheet_name=sheet_name)\
.to_csv(output,
header=True,
index=False)
print(output.getvalue())
|
<commit_before><commit_msg>Add Python script for powerpoint file diffs<commit_after>
|
from io import StringIO
import sys
import pandas as pd
for sheet_name in pd.ExcelFile(sys.argv[1]).sheet_names:
output = StringIO()
print('Sheet: %s' % sheet_name)
pd.read_excel(sys.argv[1], sheet_name=sheet_name)\
.to_csv(output,
header=True,
index=False)
print(output.getvalue())
|
Add Python script for powerpoint file diffsfrom io import StringIO
import sys
import pandas as pd
for sheet_name in pd.ExcelFile(sys.argv[1]).sheet_names:
output = StringIO()
print('Sheet: %s' % sheet_name)
pd.read_excel(sys.argv[1], sheet_name=sheet_name)\
.to_csv(output,
header=True,
index=False)
print(output.getvalue())
|
<commit_before><commit_msg>Add Python script for powerpoint file diffs<commit_after>from io import StringIO
import sys
import pandas as pd
for sheet_name in pd.ExcelFile(sys.argv[1]).sheet_names:
output = StringIO()
print('Sheet: %s' % sheet_name)
pd.read_excel(sys.argv[1], sheet_name=sheet_name)\
.to_csv(output,
header=True,
index=False)
print(output.getvalue())
|
|
8ea2670402812738e41400e5b513bd902757635e
|
ideascube/conf/idb_sen_fap.py
|
ideascube/conf/idb_sen_fap.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for FAP in Sénégal"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Futur au Présent"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SN']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'wo']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
{
'id' : 'dirtybiology'
'languages': ['fr']
},
{
'id': 'ubuntudoc',
'languages': ['fr']
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'maps',
'maps': [
(_('World'), 'world.map'),
('Sénégal', 'senegal.map'),
]
},
]
|
Add conf file for Ideasbox in Sénégal
|
Add conf file for Ideasbox in Sénégal
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Ideasbox in Sénégal
|
# -*- coding: utf-8 -*-
"""Ideaxbox for FAP in Sénégal"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Futur au Présent"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SN']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'wo']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
{
'id' : 'dirtybiology'
'languages': ['fr']
},
{
'id': 'ubuntudoc',
'languages': ['fr']
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'maps',
'maps': [
(_('World'), 'world.map'),
('Sénégal', 'senegal.map'),
]
},
]
|
<commit_before><commit_msg>Add conf file for Ideasbox in Sénégal<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for FAP in Sénégal"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Futur au Présent"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SN']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'wo']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
{
'id' : 'dirtybiology'
'languages': ['fr']
},
{
'id': 'ubuntudoc',
'languages': ['fr']
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'maps',
'maps': [
(_('World'), 'world.map'),
('Sénégal', 'senegal.map'),
]
},
]
|
Add conf file for Ideasbox in Sénégal# -*- coding: utf-8 -*-
"""Ideaxbox for FAP in Sénégal"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Futur au Présent"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SN']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'wo']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
{
'id' : 'dirtybiology'
'languages': ['fr']
},
{
'id': 'ubuntudoc',
'languages': ['fr']
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'maps',
'maps': [
(_('World'), 'world.map'),
('Sénégal', 'senegal.map'),
]
},
]
|
<commit_before><commit_msg>Add conf file for Ideasbox in Sénégal<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for FAP in Sénégal"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Futur au Présent"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SN']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'wo']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
{
'id' : 'dirtybiology'
'languages': ['fr']
},
{
'id': 'ubuntudoc',
'languages': ['fr']
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'maps',
'maps': [
(_('World'), 'world.map'),
('Sénégal', 'senegal.map'),
]
},
]
|
|
0f16c249f048e27a67f14ff834c690fc2434b55f
|
admin/common_auth/migrations/0006_auto_20170130_1611.py
|
admin/common_auth/migrations/0006_auto_20170130_1611.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common_auth', '0005_auto_20170111_1513'),
]
operations = [
migrations.AlterModelOptions(
name='adminprofile',
options={'permissions': (('mark_spam', 'Can mark comments, projects and registrations as spam'), ('view_spam', 'Can view nodes, comments, and projects marked as spam'), ('view_metrics', 'Can view metrics on the OSF Admin app'), ('view_prereg', 'Can view entries for the preregistration chellenge on the admin'), ('administer_prereg', 'Can update, comment on, and approve entries to the prereg challenge'))},
),
]
|
Add migration that adds permission for spam, metrics and prereg
|
Add migration that adds permission for spam, metrics and prereg
|
Python
|
apache-2.0
|
CenterForOpenScience/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,adlius/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,acshi/osf.io,laurenrevere/osf.io,pattisdr/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,felliott/osf.io,mattclark/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,crcresearch/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,aaxelb/osf.io,adlius/osf.io,Nesiehr/osf.io,chennan47/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,leb2dg/osf.io,chennan47/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,sloria/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,sloria/osf.io,hmoco/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,TomBaxter/osf.io,mfraezz/osf.io,aaxelb/osf.io,felliott/osf.io,erinspace/osf.io,felliott/osf.io,mattclark/osf.io,leb2dg/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,erinspace/osf.io,cwisecarver/osf.io,mattclark/osf.io,hmoco/osf.io,caneruguz/osf.io,cslzchen/osf.io,acshi/osf.io,leb2dg/osf.io,saradbowman/osf.io,monikagrabowska/osf.io,laurenrevere/osf.io,binoculars/osf.io,aaxelb/osf.io,pattisdr/osf.io,caneruguz/osf.io,chrisseto/osf.io,chennan47/osf.io,cslzchen/osf.io,icereval/osf.io,crcresearch/osf.io,acshi/osf.io,adlius/osf.io,baylee-d/osf.io,adlius/osf.io,felliott/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,acshi/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,pattisdr/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,icereval/osf.io,Johnetordoff/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,cslzchen/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,binoculars/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,caneruguz/osf.io,Johnetordoff/osf.io,sloria/osf.io,leb2dg/osf.io
|
Add migration that adds permission for spam, metrics and prereg
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common_auth', '0005_auto_20170111_1513'),
]
operations = [
migrations.AlterModelOptions(
name='adminprofile',
options={'permissions': (('mark_spam', 'Can mark comments, projects and registrations as spam'), ('view_spam', 'Can view nodes, comments, and projects marked as spam'), ('view_metrics', 'Can view metrics on the OSF Admin app'), ('view_prereg', 'Can view entries for the preregistration chellenge on the admin'), ('administer_prereg', 'Can update, comment on, and approve entries to the prereg challenge'))},
),
]
|
<commit_before><commit_msg>Add migration that adds permission for spam, metrics and prereg<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common_auth', '0005_auto_20170111_1513'),
]
operations = [
migrations.AlterModelOptions(
name='adminprofile',
options={'permissions': (('mark_spam', 'Can mark comments, projects and registrations as spam'), ('view_spam', 'Can view nodes, comments, and projects marked as spam'), ('view_metrics', 'Can view metrics on the OSF Admin app'), ('view_prereg', 'Can view entries for the preregistration chellenge on the admin'), ('administer_prereg', 'Can update, comment on, and approve entries to the prereg challenge'))},
),
]
|
Add migration that adds permission for spam, metrics and prereg# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common_auth', '0005_auto_20170111_1513'),
]
operations = [
migrations.AlterModelOptions(
name='adminprofile',
options={'permissions': (('mark_spam', 'Can mark comments, projects and registrations as spam'), ('view_spam', 'Can view nodes, comments, and projects marked as spam'), ('view_metrics', 'Can view metrics on the OSF Admin app'), ('view_prereg', 'Can view entries for the preregistration chellenge on the admin'), ('administer_prereg', 'Can update, comment on, and approve entries to the prereg challenge'))},
),
]
|
<commit_before><commit_msg>Add migration that adds permission for spam, metrics and prereg<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 22:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common_auth', '0005_auto_20170111_1513'),
]
operations = [
migrations.AlterModelOptions(
name='adminprofile',
options={'permissions': (('mark_spam', 'Can mark comments, projects and registrations as spam'), ('view_spam', 'Can view nodes, comments, and projects marked as spam'), ('view_metrics', 'Can view metrics on the OSF Admin app'), ('view_prereg', 'Can view entries for the preregistration chellenge on the admin'), ('administer_prereg', 'Can update, comment on, and approve entries to the prereg challenge'))},
),
]
|
|
7e04bc41e977ef7304972cfc630cd9bf9d2c0aa2
|
examples/anonymized-real-case.py
|
examples/anonymized-real-case.py
|
#!/usr/bin/env python2
# coding: utf-8
# this example is used in production, it is depending on private libraries
# to communicate with internal APIs, but it can help you build your own
# production company-specific hook.
import sys
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.insert(0, '/opt/python-provisioning')
from tools.verbose_logging import logging
from localcloud import compute
from api import web_hosting
# version of remote container
ALPINE_VERSION = '3.3'
def nope(msg):
return {'allowed': False, 'message': msg}
def archify(arch):
return {
'arm': 'armhf',
'x86_64': 'amd64',
}[arch]
def auth(hosting_id, *keys):
if len(keys) < 1:
return nope('no ssh key')
granted = False
web_hosting_int = web_hosting.WebHosting(hosting_id)
for key in keys:
try:
if web_hosting_int.is_valid_ssh_key(key) == True:
granted = True
break
except Exception as e:
logging.error(e)
return nope('http error')
if not granted:
return nope('access denied')
compute_int = compute.Compute()
try:
server = compute_int.get_server_by_webid(hosting_id)
logging.debug(pp.pformat(server))
except Exception as e:
logging.error(e)
return nope('error while trying to resolve server')
return {
'allowed': True,
'remote-user': hosting_id,
'image-name': 'local_web/alpine:{}-{}'.format(archify(server['arch']), ALPINE_VERSION),
'docker-run-args': [
'--name', 'ssh2docker_{}'.format(hosting_id),
'--hostname', server['name'],
'--rm',
'-it',
'-v', '/storage/users/{}/ftp:/ftp:rw'.format(hosting_id),
'-v', '/storage/users/{}/backups:/ftp/backups:ro'.format(hosting_id),
'-v', '/storage/users/{}/logs:/ftp/logs:ro'.format(hosting_id),
'-v', '/storage/users/{}/websites:/ftp/websites:rw'.format(hosting_id),
'-m', '256m',
'--cpu-shares', '512', # default = 1024, so ssh2docker gets half quota
'-u', 'webuser',
],
'env': {
'DOCKER_HOST': 'tcp://{}.local:2376'.format(server['id']),
'DOCKER_TLS_VERIFY': '1',
'DOCKER_CERT_PATH': '/opt/docker-tls/{}/.docker/'.format(server['hostname']),
},
'command': ['/bin/sh', '-i', '-l'],
}
print(json.dumps(auth(*sys.argv[1:])))
|
Add anonymzed real case example
|
Add anonymzed real case example
|
Python
|
mit
|
moul/ssh2docker,moul/ssh2docker
|
Add anonymzed real case example
|
#!/usr/bin/env python2
# coding: utf-8
# this example is used in production, it is depending on private libraries
# to communicate with internal APIs, but it can help you build your own
# production company-specific hook.
import sys
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.insert(0, '/opt/python-provisioning')
from tools.verbose_logging import logging
from localcloud import compute
from api import web_hosting
# version of remote container
ALPINE_VERSION = '3.3'
def nope(msg):
return {'allowed': False, 'message': msg}
def archify(arch):
return {
'arm': 'armhf',
'x86_64': 'amd64',
}[arch]
def auth(hosting_id, *keys):
if len(keys) < 1:
return nope('no ssh key')
granted = False
web_hosting_int = web_hosting.WebHosting(hosting_id)
for key in keys:
try:
if web_hosting_int.is_valid_ssh_key(key) == True:
granted = True
break
except Exception as e:
logging.error(e)
return nope('http error')
if not granted:
return nope('access denied')
compute_int = compute.Compute()
try:
server = compute_int.get_server_by_webid(hosting_id)
logging.debug(pp.pformat(server))
except Exception as e:
logging.error(e)
return nope('error while trying to resolve server')
return {
'allowed': True,
'remote-user': hosting_id,
'image-name': 'local_web/alpine:{}-{}'.format(archify(server['arch']), ALPINE_VERSION),
'docker-run-args': [
'--name', 'ssh2docker_{}'.format(hosting_id),
'--hostname', server['name'],
'--rm',
'-it',
'-v', '/storage/users/{}/ftp:/ftp:rw'.format(hosting_id),
'-v', '/storage/users/{}/backups:/ftp/backups:ro'.format(hosting_id),
'-v', '/storage/users/{}/logs:/ftp/logs:ro'.format(hosting_id),
'-v', '/storage/users/{}/websites:/ftp/websites:rw'.format(hosting_id),
'-m', '256m',
'--cpu-shares', '512', # default = 1024, so ssh2docker gets half quota
'-u', 'webuser',
],
'env': {
'DOCKER_HOST': 'tcp://{}.local:2376'.format(server['id']),
'DOCKER_TLS_VERIFY': '1',
'DOCKER_CERT_PATH': '/opt/docker-tls/{}/.docker/'.format(server['hostname']),
},
'command': ['/bin/sh', '-i', '-l'],
}
print(json.dumps(auth(*sys.argv[1:])))
|
<commit_before><commit_msg>Add anonymzed real case example<commit_after>
|
#!/usr/bin/env python2
# coding: utf-8
# this example is used in production, it is depending on private libraries
# to communicate with internal APIs, but it can help you build your own
# production company-specific hook.
import sys
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.insert(0, '/opt/python-provisioning')
from tools.verbose_logging import logging
from localcloud import compute
from api import web_hosting
# version of remote container
ALPINE_VERSION = '3.3'
def nope(msg):
return {'allowed': False, 'message': msg}
def archify(arch):
return {
'arm': 'armhf',
'x86_64': 'amd64',
}[arch]
def auth(hosting_id, *keys):
if len(keys) < 1:
return nope('no ssh key')
granted = False
web_hosting_int = web_hosting.WebHosting(hosting_id)
for key in keys:
try:
if web_hosting_int.is_valid_ssh_key(key) == True:
granted = True
break
except Exception as e:
logging.error(e)
return nope('http error')
if not granted:
return nope('access denied')
compute_int = compute.Compute()
try:
server = compute_int.get_server_by_webid(hosting_id)
logging.debug(pp.pformat(server))
except Exception as e:
logging.error(e)
return nope('error while trying to resolve server')
return {
'allowed': True,
'remote-user': hosting_id,
'image-name': 'local_web/alpine:{}-{}'.format(archify(server['arch']), ALPINE_VERSION),
'docker-run-args': [
'--name', 'ssh2docker_{}'.format(hosting_id),
'--hostname', server['name'],
'--rm',
'-it',
'-v', '/storage/users/{}/ftp:/ftp:rw'.format(hosting_id),
'-v', '/storage/users/{}/backups:/ftp/backups:ro'.format(hosting_id),
'-v', '/storage/users/{}/logs:/ftp/logs:ro'.format(hosting_id),
'-v', '/storage/users/{}/websites:/ftp/websites:rw'.format(hosting_id),
'-m', '256m',
'--cpu-shares', '512', # default = 1024, so ssh2docker gets half quota
'-u', 'webuser',
],
'env': {
'DOCKER_HOST': 'tcp://{}.local:2376'.format(server['id']),
'DOCKER_TLS_VERIFY': '1',
'DOCKER_CERT_PATH': '/opt/docker-tls/{}/.docker/'.format(server['hostname']),
},
'command': ['/bin/sh', '-i', '-l'],
}
print(json.dumps(auth(*sys.argv[1:])))
|
Add anonymzed real case example#!/usr/bin/env python2
# coding: utf-8
# this example is used in production, it is depending on private libraries
# to communicate with internal APIs, but it can help you build your own
# production company-specific hook.
import sys
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.insert(0, '/opt/python-provisioning')
from tools.verbose_logging import logging
from localcloud import compute
from api import web_hosting
# version of remote container
ALPINE_VERSION = '3.3'
def nope(msg):
return {'allowed': False, 'message': msg}
def archify(arch):
return {
'arm': 'armhf',
'x86_64': 'amd64',
}[arch]
def auth(hosting_id, *keys):
if len(keys) < 1:
return nope('no ssh key')
granted = False
web_hosting_int = web_hosting.WebHosting(hosting_id)
for key in keys:
try:
if web_hosting_int.is_valid_ssh_key(key) == True:
granted = True
break
except Exception as e:
logging.error(e)
return nope('http error')
if not granted:
return nope('access denied')
compute_int = compute.Compute()
try:
server = compute_int.get_server_by_webid(hosting_id)
logging.debug(pp.pformat(server))
except Exception as e:
logging.error(e)
return nope('error while trying to resolve server')
return {
'allowed': True,
'remote-user': hosting_id,
'image-name': 'local_web/alpine:{}-{}'.format(archify(server['arch']), ALPINE_VERSION),
'docker-run-args': [
'--name', 'ssh2docker_{}'.format(hosting_id),
'--hostname', server['name'],
'--rm',
'-it',
'-v', '/storage/users/{}/ftp:/ftp:rw'.format(hosting_id),
'-v', '/storage/users/{}/backups:/ftp/backups:ro'.format(hosting_id),
'-v', '/storage/users/{}/logs:/ftp/logs:ro'.format(hosting_id),
'-v', '/storage/users/{}/websites:/ftp/websites:rw'.format(hosting_id),
'-m', '256m',
'--cpu-shares', '512', # default = 1024, so ssh2docker gets half quota
'-u', 'webuser',
],
'env': {
'DOCKER_HOST': 'tcp://{}.local:2376'.format(server['id']),
'DOCKER_TLS_VERIFY': '1',
'DOCKER_CERT_PATH': '/opt/docker-tls/{}/.docker/'.format(server['hostname']),
},
'command': ['/bin/sh', '-i', '-l'],
}
print(json.dumps(auth(*sys.argv[1:])))
|
<commit_before><commit_msg>Add anonymzed real case example<commit_after>#!/usr/bin/env python2
# coding: utf-8
# this example is used in production, it is depending on private libraries
# to communicate with internal APIs, but it can help you build your own
# production company-specific hook.
import sys
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
sys.path.insert(0, '/opt/python-provisioning')
from tools.verbose_logging import logging
from localcloud import compute
from api import web_hosting
# version of remote container
ALPINE_VERSION = '3.3'
def nope(msg):
return {'allowed': False, 'message': msg}
def archify(arch):
return {
'arm': 'armhf',
'x86_64': 'amd64',
}[arch]
def auth(hosting_id, *keys):
if len(keys) < 1:
return nope('no ssh key')
granted = False
web_hosting_int = web_hosting.WebHosting(hosting_id)
for key in keys:
try:
if web_hosting_int.is_valid_ssh_key(key) == True:
granted = True
break
except Exception as e:
logging.error(e)
return nope('http error')
if not granted:
return nope('access denied')
compute_int = compute.Compute()
try:
server = compute_int.get_server_by_webid(hosting_id)
logging.debug(pp.pformat(server))
except Exception as e:
logging.error(e)
return nope('error while trying to resolve server')
return {
'allowed': True,
'remote-user': hosting_id,
'image-name': 'local_web/alpine:{}-{}'.format(archify(server['arch']), ALPINE_VERSION),
'docker-run-args': [
'--name', 'ssh2docker_{}'.format(hosting_id),
'--hostname', server['name'],
'--rm',
'-it',
'-v', '/storage/users/{}/ftp:/ftp:rw'.format(hosting_id),
'-v', '/storage/users/{}/backups:/ftp/backups:ro'.format(hosting_id),
'-v', '/storage/users/{}/logs:/ftp/logs:ro'.format(hosting_id),
'-v', '/storage/users/{}/websites:/ftp/websites:rw'.format(hosting_id),
'-m', '256m',
'--cpu-shares', '512', # default = 1024, so ssh2docker gets half quota
'-u', 'webuser',
],
'env': {
'DOCKER_HOST': 'tcp://{}.local:2376'.format(server['id']),
'DOCKER_TLS_VERIFY': '1',
'DOCKER_CERT_PATH': '/opt/docker-tls/{}/.docker/'.format(server['hostname']),
},
'command': ['/bin/sh', '-i', '-l'],
}
print(json.dumps(auth(*sys.argv[1:])))
|
|
cee56f48a8ad726498c75b38cf39e4c83ceeb359
|
analyze.py
|
analyze.py
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def load_data(data_path):
'''Return dictionary `data` from string `data_path`
'''
os.path.join(data_path, '1.dat')
data = pickle.load(open(data_path, 'rb'))
return data
def get_baseline(data):
'''Get most recent baseline/calibration from subject.
'''
baselines = []
for k, v in data.items():
if 'baseline' in v:
print(k, v)
baselines.append((k, v))
# Get most recent baseline
return sorted(baselines)[-1][1].split(' ')[-1]
def get_distances(data):
'''Get tuple of posture measurements with time stamps.
Returns:
Tuple - (time_object, distances)
'''
distances = []
for k, v in data.items():
if type(v).__module__ == 'numpy':
# Convert strings to datetime object
time_object = datetime.strptime(k, '%Y-%m-%d_%H-%M-%S')
distances.append((time_object, v[0][2]))
# Sort readings by time to restore order
time_objects, dists = zip(*sorted(zip(time_objects, widths)))
return time_object, dists
def plot(time_objects, dists):
pass
|
Add helper functions for analyzing data
|
Add helper functions for analyzing data
|
Python
|
mit
|
JustinShenk/sensei
|
Add helper functions for analyzing data
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def load_data(data_path):
'''Return dictionary `data` from string `data_path`
'''
os.path.join(data_path, '1.dat')
data = pickle.load(open(data_path, 'rb'))
return data
def get_baseline(data):
'''Get most recent baseline/calibration from subject.
'''
baselines = []
for k, v in data.items():
if 'baseline' in v:
print(k, v)
baselines.append((k, v))
# Get most recent baseline
return sorted(baselines)[-1][1].split(' ')[-1]
def get_distances(data):
'''Get tuple of posture measurements with time stamps.
Returns:
Tuple - (time_object, distances)
'''
distances = []
for k, v in data.items():
if type(v).__module__ == 'numpy':
# Convert strings to datetime object
time_object = datetime.strptime(k, '%Y-%m-%d_%H-%M-%S')
distances.append((time_object, v[0][2]))
# Sort readings by time to restore order
time_objects, dists = zip(*sorted(zip(time_objects, widths)))
return time_object, dists
def plot(time_objects, dists):
pass
|
<commit_before><commit_msg>Add helper functions for analyzing data<commit_after>
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def load_data(data_path):
'''Return dictionary `data` from string `data_path`
'''
os.path.join(data_path, '1.dat')
data = pickle.load(open(data_path, 'rb'))
return data
def get_baseline(data):
'''Get most recent baseline/calibration from subject.
'''
baselines = []
for k, v in data.items():
if 'baseline' in v:
print(k, v)
baselines.append((k, v))
# Get most recent baseline
return sorted(baselines)[-1][1].split(' ')[-1]
def get_distances(data):
'''Get tuple of posture measurements with time stamps.
Returns:
Tuple - (time_object, distances)
'''
distances = []
for k, v in data.items():
if type(v).__module__ == 'numpy':
# Convert strings to datetime object
time_object = datetime.strptime(k, '%Y-%m-%d_%H-%M-%S')
distances.append((time_object, v[0][2]))
# Sort readings by time to restore order
time_objects, dists = zip(*sorted(zip(time_objects, widths)))
return time_object, dists
def plot(time_objects, dists):
pass
|
Add helper functions for analyzing dataimport os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def load_data(data_path):
'''Return dictionary `data` from string `data_path`
'''
os.path.join(data_path, '1.dat')
data = pickle.load(open(data_path, 'rb'))
return data
def get_baseline(data):
'''Get most recent baseline/calibration from subject.
'''
baselines = []
for k, v in data.items():
if 'baseline' in v:
print(k, v)
baselines.append((k, v))
# Get most recent baseline
return sorted(baselines)[-1][1].split(' ')[-1]
def get_distances(data):
'''Get tuple of posture measurements with time stamps.
Returns:
Tuple - (time_object, distances)
'''
distances = []
for k, v in data.items():
if type(v).__module__ == 'numpy':
# Convert strings to datetime object
time_object = datetime.strptime(k, '%Y-%m-%d_%H-%M-%S')
distances.append((time_object, v[0][2]))
# Sort readings by time to restore order
time_objects, dists = zip(*sorted(zip(time_objects, widths)))
return time_object, dists
def plot(time_objects, dists):
pass
|
<commit_before><commit_msg>Add helper functions for analyzing data<commit_after>import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def load_data(data_path):
'''Return dictionary `data` from string `data_path`
'''
os.path.join(data_path, '1.dat')
data = pickle.load(open(data_path, 'rb'))
return data
def get_baseline(data):
'''Get most recent baseline/calibration from subject.
'''
baselines = []
for k, v in data.items():
if 'baseline' in v:
print(k, v)
baselines.append((k, v))
# Get most recent baseline
return sorted(baselines)[-1][1].split(' ')[-1]
def get_distances(data):
'''Get tuple of posture measurements with time stamps.
Returns:
Tuple - (time_object, distances)
'''
distances = []
for k, v in data.items():
if type(v).__module__ == 'numpy':
# Convert strings to datetime object
time_object = datetime.strptime(k, '%Y-%m-%d_%H-%M-%S')
distances.append((time_object, v[0][2]))
# Sort readings by time to restore order
time_objects, dists = zip(*sorted(zip(time_objects, widths)))
return time_object, dists
def plot(time_objects, dists):
pass
|
|
5811966fa895669aa13395a8247e966f4b957ab2
|
CodeFights/isDigit.py
|
CodeFights/isDigit.py
|
#!/usr/local/bin/python
# Code Fights Is Digit Problem
import re
def isDigit(symbol):
return bool(re.search(r'\d', symbol))
def main():
tests = [
["0", True],
["-", False],
["o", False],
["1", True],
["2", True],
["!", False],
["@", False],
["+", False],
["6", True],
["(", False],
[")", False]
]
for t in tests:
res = isDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: isDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is digit problem
|
Solve Code Fights is digit problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights is digit problem
|
#!/usr/local/bin/python
# Code Fights Is Digit Problem
import re
def isDigit(symbol):
return bool(re.search(r'\d', symbol))
def main():
tests = [
["0", True],
["-", False],
["o", False],
["1", True],
["2", True],
["!", False],
["@", False],
["+", False],
["6", True],
["(", False],
[")", False]
]
for t in tests:
res = isDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: isDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is digit problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Is Digit Problem
import re
def isDigit(symbol):
return bool(re.search(r'\d', symbol))
def main():
tests = [
["0", True],
["-", False],
["o", False],
["1", True],
["2", True],
["!", False],
["@", False],
["+", False],
["6", True],
["(", False],
[")", False]
]
for t in tests:
res = isDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: isDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is digit problem#!/usr/local/bin/python
# Code Fights Is Digit Problem
import re
def isDigit(symbol):
return bool(re.search(r'\d', symbol))
def main():
tests = [
["0", True],
["-", False],
["o", False],
["1", True],
["2", True],
["!", False],
["@", False],
["+", False],
["6", True],
["(", False],
[")", False]
]
for t in tests:
res = isDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: isDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is digit problem<commit_after>#!/usr/local/bin/python
# Code Fights Is Digit Problem
import re
def isDigit(symbol):
return bool(re.search(r'\d', symbol))
def main():
tests = [
["0", True],
["-", False],
["o", False],
["1", True],
["2", True],
["!", False],
["@", False],
["+", False],
["6", True],
["(", False],
[")", False]
]
for t in tests:
res = isDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: isDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
71b9ae31e34504b5805d37e72f7161aaa56ad5f8
|
pyconcz_2016/cfp/migrations/0002_auto_20160716_2222.py
|
pyconcz_2016/cfp/migrations/0002_auto_20160716_2222.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-16 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cfp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='proposal',
name='difficulty',
field=models.CharField(choices=[('all', 'All'), ('beginner', 'Beginner'), ('advanced', 'Advanced')], default='all', max_length=10),
),
migrations.AlterField(
model_name='proposal',
name='github',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='proposal',
name='twitter',
field=models.CharField(blank=True, max_length=20),
),
]
|
Add migrations for previous commit
|
Add migrations for previous commit
|
Python
|
mit
|
benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2017,pyvec/cz.pycon.org-2017,pyvec/cz.pycon.org-2016,benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2016,benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2017,pyvec/cz.pycon.org-2016
|
Add migrations for previous commit
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-16 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cfp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='proposal',
name='difficulty',
field=models.CharField(choices=[('all', 'All'), ('beginner', 'Beginner'), ('advanced', 'Advanced')], default='all', max_length=10),
),
migrations.AlterField(
model_name='proposal',
name='github',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='proposal',
name='twitter',
field=models.CharField(blank=True, max_length=20),
),
]
|
<commit_before><commit_msg>Add migrations for previous commit<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-16 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cfp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='proposal',
name='difficulty',
field=models.CharField(choices=[('all', 'All'), ('beginner', 'Beginner'), ('advanced', 'Advanced')], default='all', max_length=10),
),
migrations.AlterField(
model_name='proposal',
name='github',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='proposal',
name='twitter',
field=models.CharField(blank=True, max_length=20),
),
]
|
Add migrations for previous commit# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-16 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cfp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='proposal',
name='difficulty',
field=models.CharField(choices=[('all', 'All'), ('beginner', 'Beginner'), ('advanced', 'Advanced')], default='all', max_length=10),
),
migrations.AlterField(
model_name='proposal',
name='github',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='proposal',
name='twitter',
field=models.CharField(blank=True, max_length=20),
),
]
|
<commit_before><commit_msg>Add migrations for previous commit<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-07-16 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cfp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='proposal',
name='difficulty',
field=models.CharField(choices=[('all', 'All'), ('beginner', 'Beginner'), ('advanced', 'Advanced')], default='all', max_length=10),
),
migrations.AlterField(
model_name='proposal',
name='github',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='proposal',
name='twitter',
field=models.CharField(blank=True, max_length=20),
),
]
|
|
d94ab69dff6fc950b27105bf334454fcee83a1b8
|
create_properties.py
|
create_properties.py
|
import enki
e = enki.Enki('a', 'http://localhost:5001', 'translationsvoting')
e.get_all()
lines = []
for t in e.tasks:
msgstr = e.task_runs_df[t.id]['msgstr'].describe()['top']
var_id = t.info['var_id']
line = "%s= %s\n" % (var_id, msgstr)
lines.append(line)
file = open('/tmp/file.properties', 'w')
for l in lines:
file.write(l)
file.close()
|
Create properties files from votes.
|
Create properties files from votes.
|
Python
|
agpl-3.0
|
PyBossa/app-translations
|
Create properties files from votes.
|
import enki
e = enki.Enki('a', 'http://localhost:5001', 'translationsvoting')
e.get_all()
lines = []
for t in e.tasks:
msgstr = e.task_runs_df[t.id]['msgstr'].describe()['top']
var_id = t.info['var_id']
line = "%s= %s\n" % (var_id, msgstr)
lines.append(line)
file = open('/tmp/file.properties', 'w')
for l in lines:
file.write(l)
file.close()
|
<commit_before><commit_msg>Create properties files from votes.<commit_after>
|
import enki
e = enki.Enki('a', 'http://localhost:5001', 'translationsvoting')
e.get_all()
lines = []
for t in e.tasks:
msgstr = e.task_runs_df[t.id]['msgstr'].describe()['top']
var_id = t.info['var_id']
line = "%s= %s\n" % (var_id, msgstr)
lines.append(line)
file = open('/tmp/file.properties', 'w')
for l in lines:
file.write(l)
file.close()
|
Create properties files from votes.import enki
e = enki.Enki('a', 'http://localhost:5001', 'translationsvoting')
e.get_all()
lines = []
for t in e.tasks:
msgstr = e.task_runs_df[t.id]['msgstr'].describe()['top']
var_id = t.info['var_id']
line = "%s= %s\n" % (var_id, msgstr)
lines.append(line)
file = open('/tmp/file.properties', 'w')
for l in lines:
file.write(l)
file.close()
|
<commit_before><commit_msg>Create properties files from votes.<commit_after>import enki
e = enki.Enki('a', 'http://localhost:5001', 'translationsvoting')
e.get_all()
lines = []
for t in e.tasks:
msgstr = e.task_runs_df[t.id]['msgstr'].describe()['top']
var_id = t.info['var_id']
line = "%s= %s\n" % (var_id, msgstr)
lines.append(line)
file = open('/tmp/file.properties', 'w')
for l in lines:
file.write(l)
file.close()
|
|
e7caa52ffdc547c66454a200e5e9901d6015bb3e
|
hunittest/test/test_stopwatch.py
|
hunittest/test/test_stopwatch.py
|
# -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
|
Add unit test for StopWatch.
|
Add unit test for StopWatch.
|
Python
|
bsd-2-clause
|
nicolasdespres/hunittest
|
Add unit test for StopWatch.
|
# -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
|
<commit_before><commit_msg>Add unit test for StopWatch.<commit_after>
|
# -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
|
Add unit test for StopWatch.# -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
|
<commit_before><commit_msg>Add unit test for StopWatch.<commit_after># -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
|
|
b246723d837cddde898316b67ab5af1feba67817
|
zerver/migrations/0279_message_recipient_subject_indexes.py
|
zerver/migrations/0279_message_recipient_subject_indexes.py
|
# Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0278_remove_userprofile_alert_words'),
]
operations = [
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""),
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""),
]
|
Add Message indexes on (recipient_id, subject).
|
migrations: Add Message indexes on (recipient_id, subject).
Our previous set of indexes for the Message table did not contain
anything to optimize queries for all the messages in a topic in an
organization where the same topic name might appear in 10,000s of
messages in many streams.
We add two indexes here to support common queries
* A `(recipient_id, upper(subject), id)` index to support
"Fetch all messages from a topic" queries.
* A `(recipient_id, subject, id)` index to support
"Fetch all messages by topic"
We use the `DESC NULLS last` on both indexes because we almost always
want to query from the "Latest N messages" on a topic, not the
"Earliest N messages".
These indexes dramatically improve the performance of fetching topic
history (which remains not good enough in my opinion; we'll likely
need caching to make it nice), and more importantly make it possible
to check quickly which users have sent messages to a topic for the
"Topics I follow" feature.
Fixes part of #13726.
|
Python
|
apache-2.0
|
andersk/zulip,timabbott/zulip,rht/zulip,zulip/zulip,synicalsyntax/zulip,showell/zulip,kou/zulip,rht/zulip,eeshangarg/zulip,brainwane/zulip,andersk/zulip,zulip/zulip,kou/zulip,timabbott/zulip,zulip/zulip,eeshangarg/zulip,andersk/zulip,eeshangarg/zulip,timabbott/zulip,showell/zulip,showell/zulip,hackerkid/zulip,shubhamdhama/zulip,punchagan/zulip,showell/zulip,punchagan/zulip,andersk/zulip,kou/zulip,kou/zulip,synicalsyntax/zulip,brainwane/zulip,andersk/zulip,synicalsyntax/zulip,brainwane/zulip,rht/zulip,rht/zulip,eeshangarg/zulip,punchagan/zulip,kou/zulip,kou/zulip,andersk/zulip,eeshangarg/zulip,synicalsyntax/zulip,synicalsyntax/zulip,showell/zulip,zulip/zulip,hackerkid/zulip,shubhamdhama/zulip,zulip/zulip,showell/zulip,shubhamdhama/zulip,shubhamdhama/zulip,shubhamdhama/zulip,rht/zulip,timabbott/zulip,punchagan/zulip,timabbott/zulip,punchagan/zulip,eeshangarg/zulip,hackerkid/zulip,hackerkid/zulip,brainwane/zulip,punchagan/zulip,brainwane/zulip,timabbott/zulip,rht/zulip,showell/zulip,timabbott/zulip,brainwane/zulip,kou/zulip,hackerkid/zulip,synicalsyntax/zulip,hackerkid/zulip,rht/zulip,shubhamdhama/zulip,synicalsyntax/zulip,andersk/zulip,hackerkid/zulip,zulip/zulip,eeshangarg/zulip,punchagan/zulip,shubhamdhama/zulip,brainwane/zulip,zulip/zulip
|
migrations: Add Message indexes on (recipient_id, subject).
Our previous set of indexes for the Message table did not contain
anything to optimize queries for all the messages in a topic in an
organization where the same topic name might appear in 10,000s of
messages in many streams.
We add two indexes here to support common queries
* A `(recipient_id, upper(subject), id)` index to support
"Fetch all messages from a topic" queries.
* A `(recipient_id, subject, id)` index to support
"Fetch all messages by topic"
We use the `DESC NULLS last` on both indexes because we almost always
want to query from the "Latest N messages" on a topic, not the
"Earliest N messages".
These indexes dramatically improve the performance of fetching topic
history (which remains not good enough in my opinion; we'll likely
need caching to make it nice), and more importantly make it possible
to check quickly which users have sent messages to a topic for the
"Topics I follow" feature.
Fixes part of #13726.
|
# Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0278_remove_userprofile_alert_words'),
]
operations = [
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""),
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""),
]
|
<commit_before><commit_msg>migrations: Add Message indexes on (recipient_id, subject).
Our previous set of indexes for the Message table did not contain
anything to optimize queries for all the messages in a topic in an
organization where the same topic name might appear in 10,000s of
messages in many streams.
We add two indexes here to support common queries
* A `(recipient_id, upper(subject), id)` index to support
"Fetch all messages from a topic" queries.
* A `(recipient_id, subject, id)` index to support
"Fetch all messages by topic"
We use the `DESC NULLS last` on both indexes because we almost always
want to query from the "Latest N messages" on a topic, not the
"Earliest N messages".
These indexes dramatically improve the performance of fetching topic
history (which remains not good enough in my opinion; we'll likely
need caching to make it nice), and more importantly make it possible
to check quickly which users have sent messages to a topic for the
"Topics I follow" feature.
Fixes part of #13726.<commit_after>
|
# Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0278_remove_userprofile_alert_words'),
]
operations = [
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""),
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""),
]
|
migrations: Add Message indexes on (recipient_id, subject).
Our previous set of indexes for the Message table did not contain
anything to optimize queries for all the messages in a topic in an
organization where the same topic name might appear in 10,000s of
messages in many streams.
We add two indexes here to support common queries
* A `(recipient_id, upper(subject), id)` index to support
"Fetch all messages from a topic" queries.
* A `(recipient_id, subject, id)` index to support
"Fetch all messages by topic"
We use the `DESC NULLS last` on both indexes because we almost always
want to query from the "Latest N messages" on a topic, not the
"Earliest N messages".
These indexes dramatically improve the performance of fetching topic
history (which remains not good enough in my opinion; we'll likely
need caching to make it nice), and more importantly make it possible
to check quickly which users have sent messages to a topic for the
"Topics I follow" feature.
Fixes part of #13726.# Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0278_remove_userprofile_alert_words'),
]
operations = [
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""),
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""),
]
|
<commit_before><commit_msg>migrations: Add Message indexes on (recipient_id, subject).
Our previous set of indexes for the Message table did not contain
anything to optimize queries for all the messages in a topic in an
organization where the same topic name might appear in 10,000s of
messages in many streams.
We add two indexes here to support common queries
* A `(recipient_id, upper(subject), id)` index to support
"Fetch all messages from a topic" queries.
* A `(recipient_id, subject, id)` index to support
"Fetch all messages by topic"
We use the `DESC NULLS last` on both indexes because we almost always
want to query from the "Latest N messages" on a topic, not the
"Earliest N messages".
These indexes dramatically improve the performance of fetching topic
history (which remains not good enough in my opinion; we'll likely
need caching to make it nice), and more importantly make it possible
to check quickly which users have sent messages to a topic for the
"Topics I follow" feature.
Fixes part of #13726.<commit_after># Generated by Django 2.2.12 on 2020-04-30 00:35
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0278_remove_userprofile_alert_words'),
]
operations = [
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_upper_subject ON zerver_message (recipient_id, upper(subject::text), id DESC NULLS LAST);
"""),
migrations.RunSQL("""
CREATE INDEX CONCURRENTLY IF NOT EXISTS zerver_message_recipient_subject ON zerver_message (recipient_id, subject, id DESC NULLS LAST);
"""),
]
|
|
da596f8c04cb96355a12087eabea7151eb8771a3
|
osf_tests/test_maintenance.py
|
osf_tests/test_maintenance.py
|
import unittest
from datetime import timedelta
import pytest
from django.utils import timezone
from website import maintenance
from osf.models import MaintenanceState
pytestmark = pytest.mark.django_db
class TestMaintenance(unittest.TestCase):
def tearDown(self):
MaintenanceState.objects.all().delete()
def test_set_maintenance_no_params(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_twice(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_with_start_date(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == start
assert current_state.end == start + timedelta(1)
def test_set_maintenance_with_end_date(self):
end = timezone.now()
maintenance.set_maintenance(end=end.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == end - timedelta(1)
assert current_state.end == end
def test_get_maintenance(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
state = maintenance.get_maintenance()
assert state['start'] == start.isoformat()
assert state['end'] == (start + timedelta(1)).isoformat()
def test_get_maintenance_in_future(self):
start = (timezone.now() + timedelta(1)).isoformat()
maintenance.set_maintenance(start=start)
assert MaintenanceState.objects.exists()
state = maintenance.get_maintenance()
assert state['start'] == start
def test_unset_maintenance(self):
maintenance.set_maintenance()
assert MaintenanceState.objects.exists()
maintenance.unset_maintenance()
assert not MaintenanceState.objects.exists()
|
Add tests for maintenance functions
|
Add tests for maintenance functions
|
Python
|
apache-2.0
|
erinspace/osf.io,cslzchen/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,adlius/osf.io,adlius/osf.io,erinspace/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,sloria/osf.io,pattisdr/osf.io,icereval/osf.io,baylee-d/osf.io,binoculars/osf.io,crcresearch/osf.io,icereval/osf.io,caneruguz/osf.io,chrisseto/osf.io,TomBaxter/osf.io,felliott/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,chrisseto/osf.io,caneruguz/osf.io,felliott/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,chrisseto/osf.io,caseyrollins/osf.io,mfraezz/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,erinspace/osf.io,Johnetordoff/osf.io,chennan47/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,mattclark/osf.io,mattclark/osf.io,binoculars/osf.io,baylee-d/osf.io,felliott/osf.io,aaxelb/osf.io,aaxelb/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,leb2dg/osf.io,felliott/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,aaxelb/osf.io,caseyrollins/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,leb2dg/osf.io,aaxelb/osf.io,binoculars/osf.io,sloria/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,sloria/osf.io,laurenrevere/osf.io,mfraezz/osf.io,cslzchen/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,caneruguz/osf.io,chennan47/osf.io
|
Add tests for maintenance functions
|
import unittest
from datetime import timedelta
import pytest
from django.utils import timezone
from website import maintenance
from osf.models import MaintenanceState
pytestmark = pytest.mark.django_db
class TestMaintenance(unittest.TestCase):
def tearDown(self):
MaintenanceState.objects.all().delete()
def test_set_maintenance_no_params(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_twice(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_with_start_date(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == start
assert current_state.end == start + timedelta(1)
def test_set_maintenance_with_end_date(self):
end = timezone.now()
maintenance.set_maintenance(end=end.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == end - timedelta(1)
assert current_state.end == end
def test_get_maintenance(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
state = maintenance.get_maintenance()
assert state['start'] == start.isoformat()
assert state['end'] == (start + timedelta(1)).isoformat()
def test_get_maintenance_in_future(self):
start = (timezone.now() + timedelta(1)).isoformat()
maintenance.set_maintenance(start=start)
assert MaintenanceState.objects.exists()
state = maintenance.get_maintenance()
assert state['start'] == start
def test_unset_maintenance(self):
maintenance.set_maintenance()
assert MaintenanceState.objects.exists()
maintenance.unset_maintenance()
assert not MaintenanceState.objects.exists()
|
<commit_before><commit_msg>Add tests for maintenance functions<commit_after>
|
import unittest
from datetime import timedelta
import pytest
from django.utils import timezone
from website import maintenance
from osf.models import MaintenanceState
pytestmark = pytest.mark.django_db
class TestMaintenance(unittest.TestCase):
def tearDown(self):
MaintenanceState.objects.all().delete()
def test_set_maintenance_no_params(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_twice(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_with_start_date(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == start
assert current_state.end == start + timedelta(1)
def test_set_maintenance_with_end_date(self):
end = timezone.now()
maintenance.set_maintenance(end=end.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == end - timedelta(1)
assert current_state.end == end
def test_get_maintenance(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
state = maintenance.get_maintenance()
assert state['start'] == start.isoformat()
assert state['end'] == (start + timedelta(1)).isoformat()
def test_get_maintenance_in_future(self):
start = (timezone.now() + timedelta(1)).isoformat()
maintenance.set_maintenance(start=start)
assert MaintenanceState.objects.exists()
state = maintenance.get_maintenance()
assert state['start'] == start
def test_unset_maintenance(self):
maintenance.set_maintenance()
assert MaintenanceState.objects.exists()
maintenance.unset_maintenance()
assert not MaintenanceState.objects.exists()
|
Add tests for maintenance functionsimport unittest
from datetime import timedelta
import pytest
from django.utils import timezone
from website import maintenance
from osf.models import MaintenanceState
pytestmark = pytest.mark.django_db
class TestMaintenance(unittest.TestCase):
def tearDown(self):
MaintenanceState.objects.all().delete()
def test_set_maintenance_no_params(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_twice(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_with_start_date(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == start
assert current_state.end == start + timedelta(1)
def test_set_maintenance_with_end_date(self):
end = timezone.now()
maintenance.set_maintenance(end=end.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == end - timedelta(1)
assert current_state.end == end
def test_get_maintenance(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
state = maintenance.get_maintenance()
assert state['start'] == start.isoformat()
assert state['end'] == (start + timedelta(1)).isoformat()
def test_get_maintenance_in_future(self):
start = (timezone.now() + timedelta(1)).isoformat()
maintenance.set_maintenance(start=start)
assert MaintenanceState.objects.exists()
state = maintenance.get_maintenance()
assert state['start'] == start
def test_unset_maintenance(self):
maintenance.set_maintenance()
assert MaintenanceState.objects.exists()
maintenance.unset_maintenance()
assert not MaintenanceState.objects.exists()
|
<commit_before><commit_msg>Add tests for maintenance functions<commit_after>import unittest
from datetime import timedelta
import pytest
from django.utils import timezone
from website import maintenance
from osf.models import MaintenanceState
pytestmark = pytest.mark.django_db
class TestMaintenance(unittest.TestCase):
def tearDown(self):
MaintenanceState.objects.all().delete()
def test_set_maintenance_no_params(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_twice(self):
assert not MaintenanceState.objects.exists()
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
maintenance.set_maintenance()
assert MaintenanceState.objects.all().count() == 1
def test_set_maintenance_with_start_date(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == start
assert current_state.end == start + timedelta(1)
def test_set_maintenance_with_end_date(self):
end = timezone.now()
maintenance.set_maintenance(end=end.isoformat())
current_state = MaintenanceState.objects.all().first()
assert current_state.start == end - timedelta(1)
assert current_state.end == end
def test_get_maintenance(self):
start = timezone.now()
maintenance.set_maintenance(start=start.isoformat())
state = maintenance.get_maintenance()
assert state['start'] == start.isoformat()
assert state['end'] == (start + timedelta(1)).isoformat()
def test_get_maintenance_in_future(self):
start = (timezone.now() + timedelta(1)).isoformat()
maintenance.set_maintenance(start=start)
assert MaintenanceState.objects.exists()
state = maintenance.get_maintenance()
assert state['start'] == start
def test_unset_maintenance(self):
maintenance.set_maintenance()
assert MaintenanceState.objects.exists()
maintenance.unset_maintenance()
assert not MaintenanceState.objects.exists()
|
|
7079c8d78256bb24608f8a74c26273ab653c886a
|
pal/grammars/test_grammars.py
|
pal/grammars/test_grammars.py
|
import re
from collections import defaultdict
from pal.grammars.grammars import make_chomsky_normal_form
from pal.grammars.grammars import parse_grammar_from_file
from pal.grammars.parser import generate_grammar_features
from pal.grammars.parser import parse
_GRAMMARS_DIR = 'pal/grammars/services'
_EXAMPLES_FILE = 'test/examples.txt'
def main():
test_grammar('movie')
def test_grammar(service_name):
print('Testing grammar for service \'{0}\'...'.format(service_name))
grammar_file = '{0}/{1}_grammar.txt'.format(_GRAMMARS_DIR, service_name)
grammar = parse_grammar_from_file(grammar_file)
make_chomsky_normal_form(grammar)
grammar_features = generate_grammar_features(grammar)
examples = load_examples_from_file(_EXAMPLES_FILE)
total = len(examples[service_name])
success = sum(parse(example, grammar_features)
for example in examples[service_name])
print('Success:\t\t{0}/{1}'.format(success, total))
total = 0
misses = 0
for key in examples.iterkeys():
if key == service_name:
continue
total += len(examples[key])
misses += sum(parse(counterexample, grammar_features)
for counterexample in examples[key])
print('False Positives:\t{0}/{1}'.format(misses, total))
def load_examples_from_file(examples_file):
with open(examples_file) as f:
examples = defaultdict(list)
cur_name = None
for raw_line in f:
line = re.sub('(.*)[\.\?!]$', '\\1', raw_line.strip().lower())
if cur_name is None:
if line:
cur_name = line
else:
if line:
examples[cur_name].append(line)
else:
cur_name = None
return examples
if __name__ == '__main__':
main()
|
Add test for service grammars: hit rate against example queries
|
Add test for service grammars: hit rate against example queries
|
Python
|
bsd-3-clause
|
Machyne/pal,Machyne/pal,Machyne/pal,Machyne/pal
|
Add test for service grammars: hit rate against example queries
|
import re
from collections import defaultdict
from pal.grammars.grammars import make_chomsky_normal_form
from pal.grammars.grammars import parse_grammar_from_file
from pal.grammars.parser import generate_grammar_features
from pal.grammars.parser import parse
_GRAMMARS_DIR = 'pal/grammars/services'
_EXAMPLES_FILE = 'test/examples.txt'
def main():
test_grammar('movie')
def test_grammar(service_name):
print('Testing grammar for service \'{0}\'...'.format(service_name))
grammar_file = '{0}/{1}_grammar.txt'.format(_GRAMMARS_DIR, service_name)
grammar = parse_grammar_from_file(grammar_file)
make_chomsky_normal_form(grammar)
grammar_features = generate_grammar_features(grammar)
examples = load_examples_from_file(_EXAMPLES_FILE)
total = len(examples[service_name])
success = sum(parse(example, grammar_features)
for example in examples[service_name])
print('Success:\t\t{0}/{1}'.format(success, total))
total = 0
misses = 0
for key in examples.iterkeys():
if key == service_name:
continue
total += len(examples[key])
misses += sum(parse(counterexample, grammar_features)
for counterexample in examples[key])
print('False Positives:\t{0}/{1}'.format(misses, total))
def load_examples_from_file(examples_file):
with open(examples_file) as f:
examples = defaultdict(list)
cur_name = None
for raw_line in f:
line = re.sub('(.*)[\.\?!]$', '\\1', raw_line.strip().lower())
if cur_name is None:
if line:
cur_name = line
else:
if line:
examples[cur_name].append(line)
else:
cur_name = None
return examples
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test for service grammars: hit rate against example queries<commit_after>
|
import re
from collections import defaultdict
from pal.grammars.grammars import make_chomsky_normal_form
from pal.grammars.grammars import parse_grammar_from_file
from pal.grammars.parser import generate_grammar_features
from pal.grammars.parser import parse
_GRAMMARS_DIR = 'pal/grammars/services'
_EXAMPLES_FILE = 'test/examples.txt'
def main():
test_grammar('movie')
def test_grammar(service_name):
print('Testing grammar for service \'{0}\'...'.format(service_name))
grammar_file = '{0}/{1}_grammar.txt'.format(_GRAMMARS_DIR, service_name)
grammar = parse_grammar_from_file(grammar_file)
make_chomsky_normal_form(grammar)
grammar_features = generate_grammar_features(grammar)
examples = load_examples_from_file(_EXAMPLES_FILE)
total = len(examples[service_name])
success = sum(parse(example, grammar_features)
for example in examples[service_name])
print('Success:\t\t{0}/{1}'.format(success, total))
total = 0
misses = 0
for key in examples.iterkeys():
if key == service_name:
continue
total += len(examples[key])
misses += sum(parse(counterexample, grammar_features)
for counterexample in examples[key])
print('False Positives:\t{0}/{1}'.format(misses, total))
def load_examples_from_file(examples_file):
with open(examples_file) as f:
examples = defaultdict(list)
cur_name = None
for raw_line in f:
line = re.sub('(.*)[\.\?!]$', '\\1', raw_line.strip().lower())
if cur_name is None:
if line:
cur_name = line
else:
if line:
examples[cur_name].append(line)
else:
cur_name = None
return examples
if __name__ == '__main__':
main()
|
Add test for service grammars: hit rate against example queriesimport re
from collections import defaultdict
from pal.grammars.grammars import make_chomsky_normal_form
from pal.grammars.grammars import parse_grammar_from_file
from pal.grammars.parser import generate_grammar_features
from pal.grammars.parser import parse
_GRAMMARS_DIR = 'pal/grammars/services'
_EXAMPLES_FILE = 'test/examples.txt'
def main():
test_grammar('movie')
def test_grammar(service_name):
print('Testing grammar for service \'{0}\'...'.format(service_name))
grammar_file = '{0}/{1}_grammar.txt'.format(_GRAMMARS_DIR, service_name)
grammar = parse_grammar_from_file(grammar_file)
make_chomsky_normal_form(grammar)
grammar_features = generate_grammar_features(grammar)
examples = load_examples_from_file(_EXAMPLES_FILE)
total = len(examples[service_name])
success = sum(parse(example, grammar_features)
for example in examples[service_name])
print('Success:\t\t{0}/{1}'.format(success, total))
total = 0
misses = 0
for key in examples.iterkeys():
if key == service_name:
continue
total += len(examples[key])
misses += sum(parse(counterexample, grammar_features)
for counterexample in examples[key])
print('False Positives:\t{0}/{1}'.format(misses, total))
def load_examples_from_file(examples_file):
with open(examples_file) as f:
examples = defaultdict(list)
cur_name = None
for raw_line in f:
line = re.sub('(.*)[\.\?!]$', '\\1', raw_line.strip().lower())
if cur_name is None:
if line:
cur_name = line
else:
if line:
examples[cur_name].append(line)
else:
cur_name = None
return examples
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test for service grammars: hit rate against example queries<commit_after>import re
from collections import defaultdict
from pal.grammars.grammars import make_chomsky_normal_form
from pal.grammars.grammars import parse_grammar_from_file
from pal.grammars.parser import generate_grammar_features
from pal.grammars.parser import parse
_GRAMMARS_DIR = 'pal/grammars/services'
_EXAMPLES_FILE = 'test/examples.txt'
def main():
test_grammar('movie')
def test_grammar(service_name):
print('Testing grammar for service \'{0}\'...'.format(service_name))
grammar_file = '{0}/{1}_grammar.txt'.format(_GRAMMARS_DIR, service_name)
grammar = parse_grammar_from_file(grammar_file)
make_chomsky_normal_form(grammar)
grammar_features = generate_grammar_features(grammar)
examples = load_examples_from_file(_EXAMPLES_FILE)
total = len(examples[service_name])
success = sum(parse(example, grammar_features)
for example in examples[service_name])
print('Success:\t\t{0}/{1}'.format(success, total))
total = 0
misses = 0
for key in examples.iterkeys():
if key == service_name:
continue
total += len(examples[key])
misses += sum(parse(counterexample, grammar_features)
for counterexample in examples[key])
print('False Positives:\t{0}/{1}'.format(misses, total))
def load_examples_from_file(examples_file):
with open(examples_file) as f:
examples = defaultdict(list)
cur_name = None
for raw_line in f:
line = re.sub('(.*)[\.\?!]$', '\\1', raw_line.strip().lower())
if cur_name is None:
if line:
cur_name = line
else:
if line:
examples[cur_name].append(line)
else:
cur_name = None
return examples
if __name__ == '__main__':
main()
|
|
470b217e8ca8687414ff4cad39ca7cfe4710d956
|
tests/functional/test_pip_runner_script.py
|
tests/functional/test_pip_runner_script.py
|
import os
from pathlib import Path
from pip import __version__
from tests.lib import PipTestEnvironment
def test_runner_work_in_environments_with_no_pip(
script: PipTestEnvironment, pip_src: Path
) -> None:
runner = pip_src / "src" / "pip" / "__pip-runner__.py"
# Ensure there's no pip installed in the environment
script.pip("uninstall", "pip", "--yes", use_module=True)
script.pip("--version", expect_error=True)
# The runner script should still invoke a usable pip
result = script.run("python", os.fspath(runner), "--version")
assert __version__ in result.stdout
|
Add a test that the runner script works in environments without pip
|
Add a test that the runner script works in environments without pip
This ensures that the runner script can be used in environments where
pip is not installed.
|
Python
|
mit
|
sbidoul/pip,pypa/pip,pradyunsg/pip,pfmoore/pip,pradyunsg/pip,pypa/pip,sbidoul/pip,pfmoore/pip
|
Add a test that the runner script works in environments without pip
This ensures that the runner script can be used in environments where
pip is not installed.
|
import os
from pathlib import Path
from pip import __version__
from tests.lib import PipTestEnvironment
def test_runner_work_in_environments_with_no_pip(
script: PipTestEnvironment, pip_src: Path
) -> None:
runner = pip_src / "src" / "pip" / "__pip-runner__.py"
# Ensure there's no pip installed in the environment
script.pip("uninstall", "pip", "--yes", use_module=True)
script.pip("--version", expect_error=True)
# The runner script should still invoke a usable pip
result = script.run("python", os.fspath(runner), "--version")
assert __version__ in result.stdout
|
<commit_before><commit_msg>Add a test that the runner script works in environments without pip
This ensures that the runner script can be used in environments where
pip is not installed.<commit_after>
|
import os
from pathlib import Path
from pip import __version__
from tests.lib import PipTestEnvironment
def test_runner_work_in_environments_with_no_pip(
script: PipTestEnvironment, pip_src: Path
) -> None:
runner = pip_src / "src" / "pip" / "__pip-runner__.py"
# Ensure there's no pip installed in the environment
script.pip("uninstall", "pip", "--yes", use_module=True)
script.pip("--version", expect_error=True)
# The runner script should still invoke a usable pip
result = script.run("python", os.fspath(runner), "--version")
assert __version__ in result.stdout
|
Add a test that the runner script works in environments without pip
This ensures that the runner script can be used in environments where
pip is not installed.import os
from pathlib import Path
from pip import __version__
from tests.lib import PipTestEnvironment
def test_runner_work_in_environments_with_no_pip(
script: PipTestEnvironment, pip_src: Path
) -> None:
runner = pip_src / "src" / "pip" / "__pip-runner__.py"
# Ensure there's no pip installed in the environment
script.pip("uninstall", "pip", "--yes", use_module=True)
script.pip("--version", expect_error=True)
# The runner script should still invoke a usable pip
result = script.run("python", os.fspath(runner), "--version")
assert __version__ in result.stdout
|
<commit_before><commit_msg>Add a test that the runner script works in environments without pip
This ensures that the runner script can be used in environments where
pip is not installed.<commit_after>import os
from pathlib import Path
from pip import __version__
from tests.lib import PipTestEnvironment
def test_runner_work_in_environments_with_no_pip(
script: PipTestEnvironment, pip_src: Path
) -> None:
runner = pip_src / "src" / "pip" / "__pip-runner__.py"
# Ensure there's no pip installed in the environment
script.pip("uninstall", "pip", "--yes", use_module=True)
script.pip("--version", expect_error=True)
# The runner script should still invoke a usable pip
result = script.run("python", os.fspath(runner), "--version")
assert __version__ in result.stdout
|
|
34c9dbb30a731643b6a8747995fbf760dbd5377c
|
tests/header_test.py
|
tests/header_test.py
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class HeaderTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_ascii_string_header(self):
self.check('x-test-header: ascii', 'ascii')
def test_ascii_unicode_header(self):
self.check(u'x-test-header: ascii', 'ascii')
def test_unicode_string_header(self):
self.check('x-test-header: Москва', 'Москва')
def test_unicode_unicode_header(self):
self.check(u'x-test-header: Москва', u'Москва')
def test_encoded_unicode_header(self):
self.check(u'x-test-header: Москва'.encode('utf-8'), u'Москва')
def check(self, send, expected):
self.curl.setopt(pycurl.URL, 'http://localhost:8380/header?h=x-test-header')
sio = util.BytesIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.setopt(pycurl.HTTPHEADER, [send])
self.curl.perform()
self.assertEqual(expected, sio.getvalue().decode('utf-8'))
|
Add a test for sending unicode data in http headers
|
Add a test for sending unicode data in http headers
|
Python
|
lgpl-2.1
|
pycurl/pycurl,pycurl/pycurl,pycurl/pycurl
|
Add a test for sending unicode data in http headers
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class HeaderTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_ascii_string_header(self):
self.check('x-test-header: ascii', 'ascii')
def test_ascii_unicode_header(self):
self.check(u'x-test-header: ascii', 'ascii')
def test_unicode_string_header(self):
self.check('x-test-header: Москва', 'Москва')
def test_unicode_unicode_header(self):
self.check(u'x-test-header: Москва', u'Москва')
def test_encoded_unicode_header(self):
self.check(u'x-test-header: Москва'.encode('utf-8'), u'Москва')
def check(self, send, expected):
self.curl.setopt(pycurl.URL, 'http://localhost:8380/header?h=x-test-header')
sio = util.BytesIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.setopt(pycurl.HTTPHEADER, [send])
self.curl.perform()
self.assertEqual(expected, sio.getvalue().decode('utf-8'))
|
<commit_before><commit_msg>Add a test for sending unicode data in http headers<commit_after>
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class HeaderTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_ascii_string_header(self):
self.check('x-test-header: ascii', 'ascii')
def test_ascii_unicode_header(self):
self.check(u'x-test-header: ascii', 'ascii')
def test_unicode_string_header(self):
self.check('x-test-header: Москва', 'Москва')
def test_unicode_unicode_header(self):
self.check(u'x-test-header: Москва', u'Москва')
def test_encoded_unicode_header(self):
self.check(u'x-test-header: Москва'.encode('utf-8'), u'Москва')
def check(self, send, expected):
self.curl.setopt(pycurl.URL, 'http://localhost:8380/header?h=x-test-header')
sio = util.BytesIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.setopt(pycurl.HTTPHEADER, [send])
self.curl.perform()
self.assertEqual(expected, sio.getvalue().decode('utf-8'))
|
Add a test for sending unicode data in http headers#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class HeaderTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_ascii_string_header(self):
self.check('x-test-header: ascii', 'ascii')
def test_ascii_unicode_header(self):
self.check(u'x-test-header: ascii', 'ascii')
def test_unicode_string_header(self):
self.check('x-test-header: Москва', 'Москва')
def test_unicode_unicode_header(self):
self.check(u'x-test-header: Москва', u'Москва')
def test_encoded_unicode_header(self):
self.check(u'x-test-header: Москва'.encode('utf-8'), u'Москва')
def check(self, send, expected):
self.curl.setopt(pycurl.URL, 'http://localhost:8380/header?h=x-test-header')
sio = util.BytesIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.setopt(pycurl.HTTPHEADER, [send])
self.curl.perform()
self.assertEqual(expected, sio.getvalue().decode('utf-8'))
|
<commit_before><commit_msg>Add a test for sending unicode data in http headers<commit_after>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class HeaderTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_ascii_string_header(self):
self.check('x-test-header: ascii', 'ascii')
def test_ascii_unicode_header(self):
self.check(u'x-test-header: ascii', 'ascii')
def test_unicode_string_header(self):
self.check('x-test-header: Москва', 'Москва')
def test_unicode_unicode_header(self):
self.check(u'x-test-header: Москва', u'Москва')
def test_encoded_unicode_header(self):
self.check(u'x-test-header: Москва'.encode('utf-8'), u'Москва')
def check(self, send, expected):
self.curl.setopt(pycurl.URL, 'http://localhost:8380/header?h=x-test-header')
sio = util.BytesIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.setopt(pycurl.HTTPHEADER, [send])
self.curl.perform()
self.assertEqual(expected, sio.getvalue().decode('utf-8'))
|
|
a205fbc29d2c6c8004b91c3fed5b2a03427e62a4
|
locations/spiders/victra.py
|
locations/spiders/victra.py
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class VictraSpider(scrapy.Spider):
name = "victra"
allowed_domains = []
start_urls = [
'https://victra.com/Handlers/LocationData.ashx',
]
def parse_hours(self, store):
opening_hours = OpeningHours()
for hour in ['mon_hours', 'tue_hours', 'wed_hours', 'thu_hours', 'fri_hours', 'sat_hours', 'sun_hours']:
hours = store[hour]
if hours == "CLOSED":
continue
open_time, close_time = hours.split('-')
opening_hours.add_range(day=hour[:2].capitalize(), open_time=open_time,
close_time=close_time, time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores:
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal"],
'phone': store.get("phone"),
'lat': float(store["lat"]),
'lon': float(store["lng"]),
}
hours = self.parse_hours(store)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
Add spider for Victra mobile phone stores
|
Add spider for Victra mobile phone stores
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Victra mobile phone stores
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class VictraSpider(scrapy.Spider):
name = "victra"
allowed_domains = []
start_urls = [
'https://victra.com/Handlers/LocationData.ashx',
]
def parse_hours(self, store):
opening_hours = OpeningHours()
for hour in ['mon_hours', 'tue_hours', 'wed_hours', 'thu_hours', 'fri_hours', 'sat_hours', 'sun_hours']:
hours = store[hour]
if hours == "CLOSED":
continue
open_time, close_time = hours.split('-')
opening_hours.add_range(day=hour[:2].capitalize(), open_time=open_time,
close_time=close_time, time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores:
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal"],
'phone': store.get("phone"),
'lat': float(store["lat"]),
'lon': float(store["lng"]),
}
hours = self.parse_hours(store)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
<commit_before><commit_msg>Add spider for Victra mobile phone stores<commit_after>
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class VictraSpider(scrapy.Spider):
name = "victra"
allowed_domains = []
start_urls = [
'https://victra.com/Handlers/LocationData.ashx',
]
def parse_hours(self, store):
opening_hours = OpeningHours()
for hour in ['mon_hours', 'tue_hours', 'wed_hours', 'thu_hours', 'fri_hours', 'sat_hours', 'sun_hours']:
hours = store[hour]
if hours == "CLOSED":
continue
open_time, close_time = hours.split('-')
opening_hours.add_range(day=hour[:2].capitalize(), open_time=open_time,
close_time=close_time, time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores:
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal"],
'phone': store.get("phone"),
'lat': float(store["lat"]),
'lon': float(store["lng"]),
}
hours = self.parse_hours(store)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
Add spider for Victra mobile phone stores# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class VictraSpider(scrapy.Spider):
name = "victra"
allowed_domains = []
start_urls = [
'https://victra.com/Handlers/LocationData.ashx',
]
def parse_hours(self, store):
opening_hours = OpeningHours()
for hour in ['mon_hours', 'tue_hours', 'wed_hours', 'thu_hours', 'fri_hours', 'sat_hours', 'sun_hours']:
hours = store[hour]
if hours == "CLOSED":
continue
open_time, close_time = hours.split('-')
opening_hours.add_range(day=hour[:2].capitalize(), open_time=open_time,
close_time=close_time, time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores:
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal"],
'phone': store.get("phone"),
'lat': float(store["lat"]),
'lon': float(store["lng"]),
}
hours = self.parse_hours(store)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
<commit_before><commit_msg>Add spider for Victra mobile phone stores<commit_after># -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class VictraSpider(scrapy.Spider):
name = "victra"
allowed_domains = []
start_urls = [
'https://victra.com/Handlers/LocationData.ashx',
]
def parse_hours(self, store):
opening_hours = OpeningHours()
for hour in ['mon_hours', 'tue_hours', 'wed_hours', 'thu_hours', 'fri_hours', 'sat_hours', 'sun_hours']:
hours = store[hour]
if hours == "CLOSED":
continue
open_time, close_time = hours.split('-')
opening_hours.add_range(day=hour[:2].capitalize(), open_time=open_time,
close_time=close_time, time_format='%I:%M%p')
return opening_hours.as_opening_hours()
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores:
properties = {
'name': store["name"],
'ref': store["id"],
'addr_full': store["address"],
'city': store["city"],
'state': store["state"],
'postcode': store["postal"],
'phone': store.get("phone"),
'lat': float(store["lat"]),
'lon': float(store["lng"]),
}
hours = self.parse_hours(store)
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
|
|
6edadb2fe75345d24f5fec26bd4e519611f651f8
|
integration_tests/test_s3_deletion.py
|
integration_tests/test_s3_deletion.py
|
#!/usr/bin/env python
"""
Test S3 directory deletion functionality.
Uses the lsst-the-docs-test bucket in lsst-sqre's account. Also assumes that
credentials for that account are in the ltd-dev profile of ~/.aws/credentials.
"""
import sys
import os.path
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(app_path)
from tempfile import TemporaryDirectory
import logging
import boto3
from app.s3 import delete_directory
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('app.s3').level = logging.DEBUG
session = boto3.session.Session(profile_name='ltd-dev')
s3 = session.resource('s3')
bucket = s3.Bucket('lsst-the-docs-test')
paths = [
'test-dir/file1.txt',
'test-dir/file2.txt',
'test-dir/dir1/file11.txt',
'test-dir/dir1/file12.txt',
'test-dir/dir1/dir11/file111.txt',
'test-dir/dir1/dir11/file112.txt',
'test-dir/dir2/file21.txt',
'test-dir/dir2/file22.txt']
with TemporaryDirectory() as temp_dir:
create_test_files(temp_dir, paths)
for p in paths:
obj = bucket.Object(p)
obj.upload_file(os.path.join(temp_dir, p))
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 1
delete_directory('lsst-the-docs-test',
'test-dir',
aws_profile='ltd-dev')
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 0
def create_test_files(temp_dir, file_list):
for path in file_list:
write_file(temp_dir, path)
def write_file(root_dir, rel_path):
filepath = os.path.join(root_dir, rel_path)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write('Content of {0}'.format(os.path.basename(filepath)))
if __name__ == '__main__':
main()
|
Add integration test for S3 object deletion
|
Add integration test for S3 object deletion
By default, uses the test bucket 'lsst-the-docs-test' and assumes
credentials for the lsst-sqre account are in 'ltd-dev' profile of
~/.aws/credentials.
For DM-4951.
|
Python
|
mit
|
lsst-sqre/ltd-keeper,lsst-sqre/ltd-keeper
|
Add integration test for S3 object deletion
By default, uses the test bucket 'lsst-the-docs-test' and assumes
credentials for the lsst-sqre account are in 'ltd-dev' profile of
~/.aws/credentials.
For DM-4951.
|
#!/usr/bin/env python
"""
Test S3 directory deletion functionality.
Uses the lsst-the-docs-test bucket in lsst-sqre's account. Also assumes that
credentials for that account are in the ltd-dev profile of ~/.aws/credentials.
"""
import sys
import os.path
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(app_path)
from tempfile import TemporaryDirectory
import logging
import boto3
from app.s3 import delete_directory
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('app.s3').level = logging.DEBUG
session = boto3.session.Session(profile_name='ltd-dev')
s3 = session.resource('s3')
bucket = s3.Bucket('lsst-the-docs-test')
paths = [
'test-dir/file1.txt',
'test-dir/file2.txt',
'test-dir/dir1/file11.txt',
'test-dir/dir1/file12.txt',
'test-dir/dir1/dir11/file111.txt',
'test-dir/dir1/dir11/file112.txt',
'test-dir/dir2/file21.txt',
'test-dir/dir2/file22.txt']
with TemporaryDirectory() as temp_dir:
create_test_files(temp_dir, paths)
for p in paths:
obj = bucket.Object(p)
obj.upload_file(os.path.join(temp_dir, p))
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 1
delete_directory('lsst-the-docs-test',
'test-dir',
aws_profile='ltd-dev')
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 0
def create_test_files(temp_dir, file_list):
for path in file_list:
write_file(temp_dir, path)
def write_file(root_dir, rel_path):
filepath = os.path.join(root_dir, rel_path)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write('Content of {0}'.format(os.path.basename(filepath)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add integration test for S3 object deletion
By default, uses the test bucket 'lsst-the-docs-test' and assumes
credentials for the lsst-sqre account are in 'ltd-dev' profile of
~/.aws/credentials.
For DM-4951.<commit_after>
|
#!/usr/bin/env python
"""
Test S3 directory deletion functionality.
Uses the lsst-the-docs-test bucket in lsst-sqre's account. Also assumes that
credentials for that account are in the ltd-dev profile of ~/.aws/credentials.
"""
import sys
import os.path
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(app_path)
from tempfile import TemporaryDirectory
import logging
import boto3
from app.s3 import delete_directory
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('app.s3').level = logging.DEBUG
session = boto3.session.Session(profile_name='ltd-dev')
s3 = session.resource('s3')
bucket = s3.Bucket('lsst-the-docs-test')
paths = [
'test-dir/file1.txt',
'test-dir/file2.txt',
'test-dir/dir1/file11.txt',
'test-dir/dir1/file12.txt',
'test-dir/dir1/dir11/file111.txt',
'test-dir/dir1/dir11/file112.txt',
'test-dir/dir2/file21.txt',
'test-dir/dir2/file22.txt']
with TemporaryDirectory() as temp_dir:
create_test_files(temp_dir, paths)
for p in paths:
obj = bucket.Object(p)
obj.upload_file(os.path.join(temp_dir, p))
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 1
delete_directory('lsst-the-docs-test',
'test-dir',
aws_profile='ltd-dev')
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 0
def create_test_files(temp_dir, file_list):
for path in file_list:
write_file(temp_dir, path)
def write_file(root_dir, rel_path):
filepath = os.path.join(root_dir, rel_path)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write('Content of {0}'.format(os.path.basename(filepath)))
if __name__ == '__main__':
main()
|
Add integration test for S3 object deletion
By default, uses the test bucket 'lsst-the-docs-test' and assumes
credentials for the lsst-sqre account are in 'ltd-dev' profile of
~/.aws/credentials.
For DM-4951.#!/usr/bin/env python
"""
Test S3 directory deletion functionality.
Uses the lsst-the-docs-test bucket in lsst-sqre's account. Also assumes that
credentials for that account are in the ltd-dev profile of ~/.aws/credentials.
"""
import sys
import os.path
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(app_path)
from tempfile import TemporaryDirectory
import logging
import boto3
from app.s3 import delete_directory
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('app.s3').level = logging.DEBUG
session = boto3.session.Session(profile_name='ltd-dev')
s3 = session.resource('s3')
bucket = s3.Bucket('lsst-the-docs-test')
paths = [
'test-dir/file1.txt',
'test-dir/file2.txt',
'test-dir/dir1/file11.txt',
'test-dir/dir1/file12.txt',
'test-dir/dir1/dir11/file111.txt',
'test-dir/dir1/dir11/file112.txt',
'test-dir/dir2/file21.txt',
'test-dir/dir2/file22.txt']
with TemporaryDirectory() as temp_dir:
create_test_files(temp_dir, paths)
for p in paths:
obj = bucket.Object(p)
obj.upload_file(os.path.join(temp_dir, p))
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 1
delete_directory('lsst-the-docs-test',
'test-dir',
aws_profile='ltd-dev')
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 0
def create_test_files(temp_dir, file_list):
for path in file_list:
write_file(temp_dir, path)
def write_file(root_dir, rel_path):
filepath = os.path.join(root_dir, rel_path)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write('Content of {0}'.format(os.path.basename(filepath)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add integration test for S3 object deletion
By default, uses the test bucket 'lsst-the-docs-test' and assumes
credentials for the lsst-sqre account are in 'ltd-dev' profile of
~/.aws/credentials.
For DM-4951.<commit_after>#!/usr/bin/env python
"""
Test S3 directory deletion functionality.
Uses the lsst-the-docs-test bucket in lsst-sqre's account. Also assumes that
credentials for that account are in the ltd-dev profile of ~/.aws/credentials.
"""
import sys
import os.path
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(app_path)
from tempfile import TemporaryDirectory
import logging
import boto3
from app.s3 import delete_directory
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('app.s3').level = logging.DEBUG
session = boto3.session.Session(profile_name='ltd-dev')
s3 = session.resource('s3')
bucket = s3.Bucket('lsst-the-docs-test')
paths = [
'test-dir/file1.txt',
'test-dir/file2.txt',
'test-dir/dir1/file11.txt',
'test-dir/dir1/file12.txt',
'test-dir/dir1/dir11/file111.txt',
'test-dir/dir1/dir11/file112.txt',
'test-dir/dir2/file21.txt',
'test-dir/dir2/file22.txt']
with TemporaryDirectory() as temp_dir:
create_test_files(temp_dir, paths)
for p in paths:
obj = bucket.Object(p)
obj.upload_file(os.path.join(temp_dir, p))
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 1
delete_directory('lsst-the-docs-test',
'test-dir',
aws_profile='ltd-dev')
for p in paths:
obj = list(bucket.objects.filter(Prefix=p))
assert len(obj) == 0
def create_test_files(temp_dir, file_list):
for path in file_list:
write_file(temp_dir, path)
def write_file(root_dir, rel_path):
filepath = os.path.join(root_dir, rel_path)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write('Content of {0}'.format(os.path.basename(filepath)))
if __name__ == '__main__':
main()
|
|
15a9ec59e698a16cc37c3e0b430a376e3e73bd04
|
tests/test_parser.py
|
tests/test_parser.py
|
import unittest
from unittest.mock import patch, call
import whitepy.lexer as lexer
from whitepy.parser import Parser
class TestParser(unittest.TestCase):
def _get_tokens(self, filename):
with open(filename, 'r') as f:
lines = f.read()
item = lexer.Lexer(line=lines)
item.get_all_tokens()
return item.tokens
def test_get_value_signed(self):
num = Parser._get_value(Parser, ' ', signed=True)
self.assertEqual(num, 0)
def test_get_value_notsigned(self):
num = Parser._get_value(Parser, ' ', signed=False)
self.assertEqual(num, 0)
def test_hello_world(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'H'),
call.buffer.write(b'e'),
call.buffer.write(b'l'),
call.buffer.write(b'l'),
call.buffer.write(b'o'),
call.buffer.write(b','),
call.buffer.write(b' '),
call.buffer.write(b'W'),
call.buffer.write(b'o'),
call.buffer.write(b'r'),
call.buffer.write(b'l'),
call.buffer.write(b'd'),
call.buffer.write(b'!'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
def test_fibonacci(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('builtins.input', side_effect='2'):
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'1'),
call.buffer.write(b'1'),
call.buffer.write(b'2'),
call.buffer.write(b'3'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
|
Add new test for Parser
|
Add new test for Parser
Tests the main Parser.parse() function with hello world and Fibonacci whitespace
samples
|
Python
|
apache-2.0
|
yasn77/whitepy
|
Add new test for Parser
Tests the main Parser.parse() function with hello world and Fibonacci whitespace
samples
|
import unittest
from unittest.mock import patch, call
import whitepy.lexer as lexer
from whitepy.parser import Parser
class TestParser(unittest.TestCase):
def _get_tokens(self, filename):
with open(filename, 'r') as f:
lines = f.read()
item = lexer.Lexer(line=lines)
item.get_all_tokens()
return item.tokens
def test_get_value_signed(self):
num = Parser._get_value(Parser, ' ', signed=True)
self.assertEqual(num, 0)
def test_get_value_notsigned(self):
num = Parser._get_value(Parser, ' ', signed=False)
self.assertEqual(num, 0)
def test_hello_world(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'H'),
call.buffer.write(b'e'),
call.buffer.write(b'l'),
call.buffer.write(b'l'),
call.buffer.write(b'o'),
call.buffer.write(b','),
call.buffer.write(b' '),
call.buffer.write(b'W'),
call.buffer.write(b'o'),
call.buffer.write(b'r'),
call.buffer.write(b'l'),
call.buffer.write(b'd'),
call.buffer.write(b'!'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
def test_fibonacci(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('builtins.input', side_effect='2'):
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'1'),
call.buffer.write(b'1'),
call.buffer.write(b'2'),
call.buffer.write(b'3'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
|
<commit_before><commit_msg>Add new test for Parser
Tests the main Parser.parse() function with hello world and Fibonacci whitespace
samples<commit_after>
|
import unittest
from unittest.mock import patch, call
import whitepy.lexer as lexer
from whitepy.parser import Parser
class TestParser(unittest.TestCase):
def _get_tokens(self, filename):
with open(filename, 'r') as f:
lines = f.read()
item = lexer.Lexer(line=lines)
item.get_all_tokens()
return item.tokens
def test_get_value_signed(self):
num = Parser._get_value(Parser, ' ', signed=True)
self.assertEqual(num, 0)
def test_get_value_notsigned(self):
num = Parser._get_value(Parser, ' ', signed=False)
self.assertEqual(num, 0)
def test_hello_world(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'H'),
call.buffer.write(b'e'),
call.buffer.write(b'l'),
call.buffer.write(b'l'),
call.buffer.write(b'o'),
call.buffer.write(b','),
call.buffer.write(b' '),
call.buffer.write(b'W'),
call.buffer.write(b'o'),
call.buffer.write(b'r'),
call.buffer.write(b'l'),
call.buffer.write(b'd'),
call.buffer.write(b'!'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
def test_fibonacci(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('builtins.input', side_effect='2'):
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'1'),
call.buffer.write(b'1'),
call.buffer.write(b'2'),
call.buffer.write(b'3'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
|
Add new test for Parser
Tests the main Parser.parse() function with hello world and Fibonacci whitespace
samplesimport unittest
from unittest.mock import patch, call
import whitepy.lexer as lexer
from whitepy.parser import Parser
class TestParser(unittest.TestCase):
def _get_tokens(self, filename):
with open(filename, 'r') as f:
lines = f.read()
item = lexer.Lexer(line=lines)
item.get_all_tokens()
return item.tokens
def test_get_value_signed(self):
num = Parser._get_value(Parser, ' ', signed=True)
self.assertEqual(num, 0)
def test_get_value_notsigned(self):
num = Parser._get_value(Parser, ' ', signed=False)
self.assertEqual(num, 0)
def test_hello_world(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'H'),
call.buffer.write(b'e'),
call.buffer.write(b'l'),
call.buffer.write(b'l'),
call.buffer.write(b'o'),
call.buffer.write(b','),
call.buffer.write(b' '),
call.buffer.write(b'W'),
call.buffer.write(b'o'),
call.buffer.write(b'r'),
call.buffer.write(b'l'),
call.buffer.write(b'd'),
call.buffer.write(b'!'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
def test_fibonacci(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('builtins.input', side_effect='2'):
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'1'),
call.buffer.write(b'1'),
call.buffer.write(b'2'),
call.buffer.write(b'3'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
|
<commit_before><commit_msg>Add new test for Parser
Tests the main Parser.parse() function with hello world and Fibonacci whitespace
samples<commit_after>import unittest
from unittest.mock import patch, call
import whitepy.lexer as lexer
from whitepy.parser import Parser
class TestParser(unittest.TestCase):
def _get_tokens(self, filename):
with open(filename, 'r') as f:
lines = f.read()
item = lexer.Lexer(line=lines)
item.get_all_tokens()
return item.tokens
def test_get_value_signed(self):
num = Parser._get_value(Parser, ' ', signed=True)
self.assertEqual(num, 0)
def test_get_value_notsigned(self):
num = Parser._get_value(Parser, ' ', signed=False)
self.assertEqual(num, 0)
def test_hello_world(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'H'),
call.buffer.write(b'e'),
call.buffer.write(b'l'),
call.buffer.write(b'l'),
call.buffer.write(b'o'),
call.buffer.write(b','),
call.buffer.write(b' '),
call.buffer.write(b'W'),
call.buffer.write(b'o'),
call.buffer.write(b'r'),
call.buffer.write(b'l'),
call.buffer.write(b'd'),
call.buffer.write(b'!'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
def test_fibonacci(self):
tokens = self._get_tokens('./sample_ws/helloworld.ws')
p = Parser(tokens)
with self.assertRaises(SystemExit) as ec:
with patch('builtins.input', side_effect='2'):
with patch('sys.stdout') as fake_stdout:
p.parse()
(fake_stdout.asset_has_calls[
call.buffer.write(b'1'),
call.buffer.write(b'1'),
call.buffer.write(b'2'),
call.buffer.write(b'3'),
call.buffer.write(b'\n')]) and self.assertEqual(
ec.exception.code, 0)
|
|
dc10584666199797b77a759696c56e179ef8ca21
|
billjobs/serializers.py
|
billjobs/serializers.py
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
|
Add serializer for User model
|
Add serializer for User model
|
Python
|
mit
|
ioO/billjobs
|
Add serializer for User model
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
|
<commit_before><commit_msg>Add serializer for User model<commit_after>
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
|
Add serializer for User modelfrom django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
|
<commit_before><commit_msg>Add serializer for User model<commit_after>from django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
|
|
1c3082e18b63ef0ba7fb73dd69544980da7ad16c
|
zinnia/migrations/0003_publication_date.py
|
zinnia/migrations/0003_publication_date.py
|
from django.db import models
from django.db import migrations
from django.utils import timezone
def fill_publication_date(apps, schema_editor):
Entry = apps.get_model('zinnia', 'Entry')
for entry in Entry.objects.all():
entry.publication_date = entry.creation_date
entry.save()
def unfill_publication_date(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0002_lead_paragraph_and_image_caption'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={
'ordering': ['-publication_date'],
'get_latest_by': 'publication_date',
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
'permissions': (('can_view_all', 'Can view all entries'),
('can_change_status', 'Can change status'),
('can_change_author', 'Can change author(s)'))
}
),
migrations.AddField(
model_name='entry',
name='publication_date',
field=models.DateTimeField(
default=timezone.now,
help_text="Used to build the entry's URL.",
verbose_name='publication date',
db_index=True),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateTimeField(
default=timezone.now,
verbose_name='creation date'),
),
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(
help_text="Used to build the entry's URL.",
max_length=255,
verbose_name='slug',
unique_for_date='publication_date'),
),
migrations.AlterIndexTogether(
name='entry',
index_together=set([
('slug', 'publication_date'),
('status', 'publication_date',
'start_publication', 'end_publication')]),
),
migrations.RunPython(fill_publication_date, unfill_publication_date)
]
|
Add migration for publication_date field
|
Add migration for publication_date field
|
Python
|
bsd-3-clause
|
ghachey/django-blog-zinnia,dapeng0802/django-blog-zinnia,petecummings/django-blog-zinnia,aorzh/django-blog-zinnia,extertioner/django-blog-zinnia,Zopieux/django-blog-zinnia,bywbilly/django-blog-zinnia,extertioner/django-blog-zinnia,marctc/django-blog-zinnia,bywbilly/django-blog-zinnia,marctc/django-blog-zinnia,extertioner/django-blog-zinnia,Zopieux/django-blog-zinnia,dapeng0802/django-blog-zinnia,dapeng0802/django-blog-zinnia,ghachey/django-blog-zinnia,petecummings/django-blog-zinnia,Fantomas42/django-blog-zinnia,aorzh/django-blog-zinnia,aorzh/django-blog-zinnia,Fantomas42/django-blog-zinnia,Fantomas42/django-blog-zinnia,marctc/django-blog-zinnia,Zopieux/django-blog-zinnia,ghachey/django-blog-zinnia,petecummings/django-blog-zinnia,bywbilly/django-blog-zinnia
|
Add migration for publication_date field
|
from django.db import models
from django.db import migrations
from django.utils import timezone
def fill_publication_date(apps, schema_editor):
Entry = apps.get_model('zinnia', 'Entry')
for entry in Entry.objects.all():
entry.publication_date = entry.creation_date
entry.save()
def unfill_publication_date(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0002_lead_paragraph_and_image_caption'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={
'ordering': ['-publication_date'],
'get_latest_by': 'publication_date',
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
'permissions': (('can_view_all', 'Can view all entries'),
('can_change_status', 'Can change status'),
('can_change_author', 'Can change author(s)'))
}
),
migrations.AddField(
model_name='entry',
name='publication_date',
field=models.DateTimeField(
default=timezone.now,
help_text="Used to build the entry's URL.",
verbose_name='publication date',
db_index=True),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateTimeField(
default=timezone.now,
verbose_name='creation date'),
),
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(
help_text="Used to build the entry's URL.",
max_length=255,
verbose_name='slug',
unique_for_date='publication_date'),
),
migrations.AlterIndexTogether(
name='entry',
index_together=set([
('slug', 'publication_date'),
('status', 'publication_date',
'start_publication', 'end_publication')]),
),
migrations.RunPython(fill_publication_date, unfill_publication_date)
]
|
<commit_before><commit_msg>Add migration for publication_date field<commit_after>
|
from django.db import models
from django.db import migrations
from django.utils import timezone
def fill_publication_date(apps, schema_editor):
Entry = apps.get_model('zinnia', 'Entry')
for entry in Entry.objects.all():
entry.publication_date = entry.creation_date
entry.save()
def unfill_publication_date(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0002_lead_paragraph_and_image_caption'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={
'ordering': ['-publication_date'],
'get_latest_by': 'publication_date',
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
'permissions': (('can_view_all', 'Can view all entries'),
('can_change_status', 'Can change status'),
('can_change_author', 'Can change author(s)'))
}
),
migrations.AddField(
model_name='entry',
name='publication_date',
field=models.DateTimeField(
default=timezone.now,
help_text="Used to build the entry's URL.",
verbose_name='publication date',
db_index=True),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateTimeField(
default=timezone.now,
verbose_name='creation date'),
),
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(
help_text="Used to build the entry's URL.",
max_length=255,
verbose_name='slug',
unique_for_date='publication_date'),
),
migrations.AlterIndexTogether(
name='entry',
index_together=set([
('slug', 'publication_date'),
('status', 'publication_date',
'start_publication', 'end_publication')]),
),
migrations.RunPython(fill_publication_date, unfill_publication_date)
]
|
Add migration for publication_date fieldfrom django.db import models
from django.db import migrations
from django.utils import timezone
def fill_publication_date(apps, schema_editor):
Entry = apps.get_model('zinnia', 'Entry')
for entry in Entry.objects.all():
entry.publication_date = entry.creation_date
entry.save()
def unfill_publication_date(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0002_lead_paragraph_and_image_caption'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={
'ordering': ['-publication_date'],
'get_latest_by': 'publication_date',
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
'permissions': (('can_view_all', 'Can view all entries'),
('can_change_status', 'Can change status'),
('can_change_author', 'Can change author(s)'))
}
),
migrations.AddField(
model_name='entry',
name='publication_date',
field=models.DateTimeField(
default=timezone.now,
help_text="Used to build the entry's URL.",
verbose_name='publication date',
db_index=True),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateTimeField(
default=timezone.now,
verbose_name='creation date'),
),
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(
help_text="Used to build the entry's URL.",
max_length=255,
verbose_name='slug',
unique_for_date='publication_date'),
),
migrations.AlterIndexTogether(
name='entry',
index_together=set([
('slug', 'publication_date'),
('status', 'publication_date',
'start_publication', 'end_publication')]),
),
migrations.RunPython(fill_publication_date, unfill_publication_date)
]
|
<commit_before><commit_msg>Add migration for publication_date field<commit_after>from django.db import models
from django.db import migrations
from django.utils import timezone
def fill_publication_date(apps, schema_editor):
Entry = apps.get_model('zinnia', 'Entry')
for entry in Entry.objects.all():
entry.publication_date = entry.creation_date
entry.save()
def unfill_publication_date(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0002_lead_paragraph_and_image_caption'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={
'ordering': ['-publication_date'],
'get_latest_by': 'publication_date',
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
'permissions': (('can_view_all', 'Can view all entries'),
('can_change_status', 'Can change status'),
('can_change_author', 'Can change author(s)'))
}
),
migrations.AddField(
model_name='entry',
name='publication_date',
field=models.DateTimeField(
default=timezone.now,
help_text="Used to build the entry's URL.",
verbose_name='publication date',
db_index=True),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateTimeField(
default=timezone.now,
verbose_name='creation date'),
),
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(
help_text="Used to build the entry's URL.",
max_length=255,
verbose_name='slug',
unique_for_date='publication_date'),
),
migrations.AlterIndexTogether(
name='entry',
index_together=set([
('slug', 'publication_date'),
('status', 'publication_date',
'start_publication', 'end_publication')]),
),
migrations.RunPython(fill_publication_date, unfill_publication_date)
]
|
|
e9f6031f9368cd036826f40c113e5ca9d420409a
|
lily/contacts/migrations/0013_auto_20170717_2005.py
|
lily/contacts/migrations/0013_auto_20170717_2005.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_remove_contact_preposition'),
]
operations = [
migrations.AlterField(
model_name='function',
name='manager',
field=models.ForeignKey(related_name='manager', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contacts.Contact', null=True),
),
]
|
Add extra migration for the manager field
|
Add extra migration for the manager field
|
Python
|
agpl-3.0
|
HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily
|
Add extra migration for the manager field
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_remove_contact_preposition'),
]
operations = [
migrations.AlterField(
model_name='function',
name='manager',
field=models.ForeignKey(related_name='manager', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contacts.Contact', null=True),
),
]
|
<commit_before><commit_msg>Add extra migration for the manager field<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_remove_contact_preposition'),
]
operations = [
migrations.AlterField(
model_name='function',
name='manager',
field=models.ForeignKey(related_name='manager', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contacts.Contact', null=True),
),
]
|
Add extra migration for the manager field# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_remove_contact_preposition'),
]
operations = [
migrations.AlterField(
model_name='function',
name='manager',
field=models.ForeignKey(related_name='manager', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contacts.Contact', null=True),
),
]
|
<commit_before><commit_msg>Add extra migration for the manager field<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_remove_contact_preposition'),
]
operations = [
migrations.AlterField(
model_name='function',
name='manager',
field=models.ForeignKey(related_name='manager', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contacts.Contact', null=True),
),
]
|
|
b2635fd52aa474e6dd375ad804f46a16945962a4
|
lib/svtplay/service/hbo.py
|
lib/svtplay/service/hbo.py
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, options, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
Add options argument to get()
|
HBO: Add options argument to get()
|
Python
|
mit
|
qnorsten/svtplay-dl,dalgr/svtplay-dl,olof/svtplay-dl,leakim/svtplay-dl,spaam/svtplay-dl,selepo/svtplay-dl,OakNinja/svtplay-dl,iwconfig/svtplay-dl,dalgr/svtplay-dl,spaam/svtplay-dl,OakNinja/svtplay-dl,leakim/svtplay-dl,iwconfig/svtplay-dl,qnorsten/svtplay-dl,selepo/svtplay-dl,OakNinja/svtplay-dl,leakim/svtplay-dl,olof/svtplay-dl
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
HBO: Add options argument to get()
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, options, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
<commit_before>class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
<commit_msg>HBO: Add options argument to get()<commit_after>
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, options, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
HBO: Add options argument to get()class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, options, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
<commit_before>class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
<commit_msg>HBO: Add options argument to get()<commit_after>class Hbo():
def handle(self, url):
return "hbo.com" in url
def get(self, options, url):
parse = urlparse(url)
try:
other = parse[5]
except KeyError:
log.error("Something wrong with that url")
sys.exit(2)
match = re.search("^/(.*).html", other)
if not match:
log.error("Cant find video file")
sys.exit(2)
url = "http://www.hbo.com/data/content/%s.xml" % match.group(1)
data = get_http_data(url)
xml = ET.XML(data)
videoid = xml.find("content")[1].find("videoId").text
url = "http://render.cdn.hbo.com/data/content/global/videos/data/%s.xml" % videoid
data = get_http_data(url)
xml = ET.XML(data)
ss = xml.find("videos")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("size"))
else:
sa = list(ss.iter("size"))
streams = {}
for i in sa:
stream = {}
stream["path"] = i.find("tv14").find("path").text
streams[int(i.attrib["width"])] = stream
test = select_quality(options, streams)
download_rtmp(options, test["path"])
|
c2817959fea63db83a328a8ce61b95031b04c4bd
|
mopidy/frontends/mpd/__init__.py
|
mopidy/frontends/mpd/__init__.py
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.server = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.process = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
Initialize the correct variable in init
|
Initialize the correct variable in init
|
Python
|
apache-2.0
|
dbrgn/mopidy,dbrgn/mopidy,ali/mopidy,diandiankan/mopidy,kingosticks/mopidy,SuperStarPL/mopidy,abarisain/mopidy,pacificIT/mopidy,kingosticks/mopidy,tkem/mopidy,bacontext/mopidy,pacificIT/mopidy,SuperStarPL/mopidy,ali/mopidy,vrs01/mopidy,quartz55/mopidy,quartz55/mopidy,tkem/mopidy,tkem/mopidy,mopidy/mopidy,dbrgn/mopidy,glogiotatidis/mopidy,jcass77/mopidy,glogiotatidis/mopidy,bacontext/mopidy,tkem/mopidy,jmarsik/mopidy,mokieyue/mopidy,liamw9534/mopidy,rawdlite/mopidy,hkariti/mopidy,glogiotatidis/mopidy,ali/mopidy,mokieyue/mopidy,diandiankan/mopidy,pacificIT/mopidy,adamcik/mopidy,ali/mopidy,ZenithDK/mopidy,diandiankan/mopidy,mokieyue/mopidy,rawdlite/mopidy,bencevans/mopidy,quartz55/mopidy,swak/mopidy,kingosticks/mopidy,mopidy/mopidy,woutervanwijk/mopidy,jodal/mopidy,priestd09/mopidy,liamw9534/mopidy,jcass77/mopidy,bencevans/mopidy,abarisain/mopidy,quartz55/mopidy,vrs01/mopidy,jmarsik/mopidy,rawdlite/mopidy,mopidy/mopidy,jodal/mopidy,swak/mopidy,jcass77/mopidy,dbrgn/mopidy,adamcik/mopidy,bencevans/mopidy,rawdlite/mopidy,bacontext/mopidy,ZenithDK/mopidy,hkariti/mopidy,priestd09/mopidy,woutervanwijk/mopidy,hkariti/mopidy,ZenithDK/mopidy,pacificIT/mopidy,vrs01/mopidy,swak/mopidy,bacontext/mopidy,jmarsik/mopidy,ZenithDK/mopidy,glogiotatidis/mopidy,priestd09/mopidy,SuperStarPL/mopidy,vrs01/mopidy,hkariti/mopidy,jodal/mopidy,jmarsik/mopidy,swak/mopidy,mokieyue/mopidy,diandiankan/mopidy,adamcik/mopidy,bencevans/mopidy,SuperStarPL/mopidy
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.server = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
Initialize the correct variable in init
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.process = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
<commit_before>from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.server = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
<commit_msg>Initialize the correct variable in init<commit_after>
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.process = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.server = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
Initialize the correct variable in initfrom mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.process = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
<commit_before>from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.server = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
<commit_msg>Initialize the correct variable in init<commit_after>from mopidy.frontends.mpd.dispatcher import MpdDispatcher
from mopidy.frontends.mpd.process import MpdProcess
class MpdFrontend(object):
"""
The MPD frontend.
"""
def __init__(self):
self.process = None
self.dispatcher = None
def start_server(self, core_queue):
"""
Starts the MPD server.
:param core_queue: the core queue
:type core_queue: :class:`multiprocessing.Queue`
"""
self.process = MpdProcess(core_queue)
self.process.start()
def create_dispatcher(self, backend):
"""
Creates a dispatcher for MPD requests.
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
:rtype: :class:`mopidy.frontends.mpd.dispatcher.MpdDispatcher`
"""
self.dispatcher = MpdDispatcher(backend)
return self.dispatcher
|
df810f11409212edce61263205288cfbb7221a2d
|
scripts/evaluation/process_api_performance_files.py
|
scripts/evaluation/process_api_performance_files.py
|
import sys
import os
output_dir = sys.argv[1]
def filename(num_duplications):
return os.path.join(output_dir, "output_%s.csv" % (str(num_duplications),))
def time_for_method(fname):
with open(fname) as f:
lines = f.readlines()[1:]
lines = [line.replace("\n", "").split(", ") for line in lines]
lines = [(l[0].strip(), float(l[1].strip())) for l in lines]
return lines
times_for_method = {}
for num_duplications in (1, 5, 10, 20, 40, 60, 80, 100, 140, 160, 180, 250, 300, 350, 400):
fname = filename(num_duplications)
if os.path.isfile(fname):
for (method, time) in time_for_method(fname):
if method not in times_for_method:
times_for_method[method] = []
times_for_method[method].append((num_duplications, time))
for (method, times) in times_for_method.iteritems():
for (num_duplications, time) in times:
print(method + ", " + str(num_duplications) + ", " + str(time))
|
Write script to parse API performance data
|
Write script to parse API performance data
|
Python
|
mit
|
mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb
|
Write script to parse API performance data
|
import sys
import os
output_dir = sys.argv[1]
def filename(num_duplications):
return os.path.join(output_dir, "output_%s.csv" % (str(num_duplications),))
def time_for_method(fname):
with open(fname) as f:
lines = f.readlines()[1:]
lines = [line.replace("\n", "").split(", ") for line in lines]
lines = [(l[0].strip(), float(l[1].strip())) for l in lines]
return lines
times_for_method = {}
for num_duplications in (1, 5, 10, 20, 40, 60, 80, 100, 140, 160, 180, 250, 300, 350, 400):
fname = filename(num_duplications)
if os.path.isfile(fname):
for (method, time) in time_for_method(fname):
if method not in times_for_method:
times_for_method[method] = []
times_for_method[method].append((num_duplications, time))
for (method, times) in times_for_method.iteritems():
for (num_duplications, time) in times:
print(method + ", " + str(num_duplications) + ", " + str(time))
|
<commit_before><commit_msg>Write script to parse API performance data<commit_after>
|
import sys
import os
output_dir = sys.argv[1]
def filename(num_duplications):
return os.path.join(output_dir, "output_%s.csv" % (str(num_duplications),))
def time_for_method(fname):
with open(fname) as f:
lines = f.readlines()[1:]
lines = [line.replace("\n", "").split(", ") for line in lines]
lines = [(l[0].strip(), float(l[1].strip())) for l in lines]
return lines
times_for_method = {}
for num_duplications in (1, 5, 10, 20, 40, 60, 80, 100, 140, 160, 180, 250, 300, 350, 400):
fname = filename(num_duplications)
if os.path.isfile(fname):
for (method, time) in time_for_method(fname):
if method not in times_for_method:
times_for_method[method] = []
times_for_method[method].append((num_duplications, time))
for (method, times) in times_for_method.iteritems():
for (num_duplications, time) in times:
print(method + ", " + str(num_duplications) + ", " + str(time))
|
Write script to parse API performance dataimport sys
import os
output_dir = sys.argv[1]
def filename(num_duplications):
return os.path.join(output_dir, "output_%s.csv" % (str(num_duplications),))
def time_for_method(fname):
with open(fname) as f:
lines = f.readlines()[1:]
lines = [line.replace("\n", "").split(", ") for line in lines]
lines = [(l[0].strip(), float(l[1].strip())) for l in lines]
return lines
times_for_method = {}
for num_duplications in (1, 5, 10, 20, 40, 60, 80, 100, 140, 160, 180, 250, 300, 350, 400):
fname = filename(num_duplications)
if os.path.isfile(fname):
for (method, time) in time_for_method(fname):
if method not in times_for_method:
times_for_method[method] = []
times_for_method[method].append((num_duplications, time))
for (method, times) in times_for_method.iteritems():
for (num_duplications, time) in times:
print(method + ", " + str(num_duplications) + ", " + str(time))
|
<commit_before><commit_msg>Write script to parse API performance data<commit_after>import sys
import os
output_dir = sys.argv[1]
def filename(num_duplications):
return os.path.join(output_dir, "output_%s.csv" % (str(num_duplications),))
def time_for_method(fname):
with open(fname) as f:
lines = f.readlines()[1:]
lines = [line.replace("\n", "").split(", ") for line in lines]
lines = [(l[0].strip(), float(l[1].strip())) for l in lines]
return lines
times_for_method = {}
for num_duplications in (1, 5, 10, 20, 40, 60, 80, 100, 140, 160, 180, 250, 300, 350, 400):
fname = filename(num_duplications)
if os.path.isfile(fname):
for (method, time) in time_for_method(fname):
if method not in times_for_method:
times_for_method[method] = []
times_for_method[method].append((num_duplications, time))
for (method, times) in times_for_method.iteritems():
for (num_duplications, time) in times:
print(method + ", " + str(num_duplications) + ", " + str(time))
|
|
d44dd85a1020da3971a50ceb1c0c8150b05d5334
|
scripts/utils/shape-to-csv.py
|
scripts/utils/shape-to-csv.py
|
#!/usr/bin/env python3
import csv
import ogr
import osr
import sys
import unicodedata
def usage():
print('shape-to-csv.py PATH_TO_INPUT_SHAPE PATH_TO_OUTPUT_CSV')
def main():
# Inspired by http://gis.stackexchange.com/a/19178
# This is design to work with the places.shp from Swisstzerland.
shpfile = sys.argv[1]
csvfile = sys.argv[2]
# Open files
csvfile = open(csvfile,'w')
ds = ogr.Open(shpfile)
lyr = ds.GetLayer()
# Get field names
fields = ['num', 'weight', 'search_string', 'label', 'origin', 'geom_quadindex', 'geom_st_box2d', 'rank', 'x', 'y', 'lat', 'lon']
csvwriter = csv.DictWriter(csvfile, fields)
csvwriter.writeheader()
# Write attributes and kml out to csv
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(2056)
transform = osr.CoordinateTransformation(source, target)
for i, feat in enumerate(lyr):
attributes = feat.items()
if attributes['name'] is None:
continue
geom = feat.GetGeometryRef()
geom_2056 = geom.Clone()
geom_2056.Transform(transform)
attributes['num'] = attributes.pop('osm_id')
attributes['weight'] = i + 1
# Remove accents in search string. See http://stackoverflow.com/a/15261831
attributes['search_string'] = ''.join((c for c in unicodedata.normalize('NFD', attributes['name']) if unicodedata.category(c) != 'Mn'))
attributes['label'] = attributes.pop('name')
attributes['origin'] = 'places'
attributes['geom_quadindex'] = 0
attributes['geom_st_box2d'] = 'BOX({y} {x}, {y} {x})'.format(y=geom_2056.GetX(), x=geom_2056.GetY())
attributes['rank'] = 10
attributes['x'] = geom_2056.GetY()
attributes['y'] = geom_2056.GetX()
attributes['lat'] = geom.GetY()
attributes['lon'] = geom.GetX()
del attributes['population']
del attributes['type']
csvwriter.writerow(attributes)
#clean up
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
main()
|
Add script to convert ShapeFile to CSV for search
|
Add script to convert ShapeFile to CSV for search
Used by customer infra to have search.
|
Python
|
agpl-3.0
|
ioda-net/geo-infra,ioda-net/geo-infra,ioda-net/geo-infra,ioda-net/geo-infra
|
Add script to convert ShapeFile to CSV for search
Used by customer infra to have search.
|
#!/usr/bin/env python3
import csv
import ogr
import osr
import sys
import unicodedata
def usage():
print('shape-to-csv.py PATH_TO_INPUT_SHAPE PATH_TO_OUTPUT_CSV')
def main():
# Inspired by http://gis.stackexchange.com/a/19178
# This is design to work with the places.shp from Swisstzerland.
shpfile = sys.argv[1]
csvfile = sys.argv[2]
# Open files
csvfile = open(csvfile,'w')
ds = ogr.Open(shpfile)
lyr = ds.GetLayer()
# Get field names
fields = ['num', 'weight', 'search_string', 'label', 'origin', 'geom_quadindex', 'geom_st_box2d', 'rank', 'x', 'y', 'lat', 'lon']
csvwriter = csv.DictWriter(csvfile, fields)
csvwriter.writeheader()
# Write attributes and kml out to csv
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(2056)
transform = osr.CoordinateTransformation(source, target)
for i, feat in enumerate(lyr):
attributes = feat.items()
if attributes['name'] is None:
continue
geom = feat.GetGeometryRef()
geom_2056 = geom.Clone()
geom_2056.Transform(transform)
attributes['num'] = attributes.pop('osm_id')
attributes['weight'] = i + 1
# Remove accents in search string. See http://stackoverflow.com/a/15261831
attributes['search_string'] = ''.join((c for c in unicodedata.normalize('NFD', attributes['name']) if unicodedata.category(c) != 'Mn'))
attributes['label'] = attributes.pop('name')
attributes['origin'] = 'places'
attributes['geom_quadindex'] = 0
attributes['geom_st_box2d'] = 'BOX({y} {x}, {y} {x})'.format(y=geom_2056.GetX(), x=geom_2056.GetY())
attributes['rank'] = 10
attributes['x'] = geom_2056.GetY()
attributes['y'] = geom_2056.GetX()
attributes['lat'] = geom.GetY()
attributes['lon'] = geom.GetX()
del attributes['population']
del attributes['type']
csvwriter.writerow(attributes)
#clean up
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
main()
|
<commit_before><commit_msg>Add script to convert ShapeFile to CSV for search
Used by customer infra to have search.<commit_after>
|
#!/usr/bin/env python3
import csv
import ogr
import osr
import sys
import unicodedata
def usage():
print('shape-to-csv.py PATH_TO_INPUT_SHAPE PATH_TO_OUTPUT_CSV')
def main():
# Inspired by http://gis.stackexchange.com/a/19178
# This is design to work with the places.shp from Swisstzerland.
shpfile = sys.argv[1]
csvfile = sys.argv[2]
# Open files
csvfile = open(csvfile,'w')
ds = ogr.Open(shpfile)
lyr = ds.GetLayer()
# Get field names
fields = ['num', 'weight', 'search_string', 'label', 'origin', 'geom_quadindex', 'geom_st_box2d', 'rank', 'x', 'y', 'lat', 'lon']
csvwriter = csv.DictWriter(csvfile, fields)
csvwriter.writeheader()
# Write attributes and kml out to csv
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(2056)
transform = osr.CoordinateTransformation(source, target)
for i, feat in enumerate(lyr):
attributes = feat.items()
if attributes['name'] is None:
continue
geom = feat.GetGeometryRef()
geom_2056 = geom.Clone()
geom_2056.Transform(transform)
attributes['num'] = attributes.pop('osm_id')
attributes['weight'] = i + 1
# Remove accents in search string. See http://stackoverflow.com/a/15261831
attributes['search_string'] = ''.join((c for c in unicodedata.normalize('NFD', attributes['name']) if unicodedata.category(c) != 'Mn'))
attributes['label'] = attributes.pop('name')
attributes['origin'] = 'places'
attributes['geom_quadindex'] = 0
attributes['geom_st_box2d'] = 'BOX({y} {x}, {y} {x})'.format(y=geom_2056.GetX(), x=geom_2056.GetY())
attributes['rank'] = 10
attributes['x'] = geom_2056.GetY()
attributes['y'] = geom_2056.GetX()
attributes['lat'] = geom.GetY()
attributes['lon'] = geom.GetX()
del attributes['population']
del attributes['type']
csvwriter.writerow(attributes)
#clean up
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
main()
|
Add script to convert ShapeFile to CSV for search
Used by customer infra to have search.#!/usr/bin/env python3
import csv
import ogr
import osr
import sys
import unicodedata
def usage():
print('shape-to-csv.py PATH_TO_INPUT_SHAPE PATH_TO_OUTPUT_CSV')
def main():
# Inspired by http://gis.stackexchange.com/a/19178
# This is design to work with the places.shp from Swisstzerland.
shpfile = sys.argv[1]
csvfile = sys.argv[2]
# Open files
csvfile = open(csvfile,'w')
ds = ogr.Open(shpfile)
lyr = ds.GetLayer()
# Get field names
fields = ['num', 'weight', 'search_string', 'label', 'origin', 'geom_quadindex', 'geom_st_box2d', 'rank', 'x', 'y', 'lat', 'lon']
csvwriter = csv.DictWriter(csvfile, fields)
csvwriter.writeheader()
# Write attributes and kml out to csv
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(2056)
transform = osr.CoordinateTransformation(source, target)
for i, feat in enumerate(lyr):
attributes = feat.items()
if attributes['name'] is None:
continue
geom = feat.GetGeometryRef()
geom_2056 = geom.Clone()
geom_2056.Transform(transform)
attributes['num'] = attributes.pop('osm_id')
attributes['weight'] = i + 1
# Remove accents in search string. See http://stackoverflow.com/a/15261831
attributes['search_string'] = ''.join((c for c in unicodedata.normalize('NFD', attributes['name']) if unicodedata.category(c) != 'Mn'))
attributes['label'] = attributes.pop('name')
attributes['origin'] = 'places'
attributes['geom_quadindex'] = 0
attributes['geom_st_box2d'] = 'BOX({y} {x}, {y} {x})'.format(y=geom_2056.GetX(), x=geom_2056.GetY())
attributes['rank'] = 10
attributes['x'] = geom_2056.GetY()
attributes['y'] = geom_2056.GetX()
attributes['lat'] = geom.GetY()
attributes['lon'] = geom.GetX()
del attributes['population']
del attributes['type']
csvwriter.writerow(attributes)
#clean up
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
main()
|
<commit_before><commit_msg>Add script to convert ShapeFile to CSV for search
Used by customer infra to have search.<commit_after>#!/usr/bin/env python3
import csv
import ogr
import osr
import sys
import unicodedata
def usage():
print('shape-to-csv.py PATH_TO_INPUT_SHAPE PATH_TO_OUTPUT_CSV')
def main():
# Inspired by http://gis.stackexchange.com/a/19178
# This is design to work with the places.shp from Swisstzerland.
shpfile = sys.argv[1]
csvfile = sys.argv[2]
# Open files
csvfile = open(csvfile,'w')
ds = ogr.Open(shpfile)
lyr = ds.GetLayer()
# Get field names
fields = ['num', 'weight', 'search_string', 'label', 'origin', 'geom_quadindex', 'geom_st_box2d', 'rank', 'x', 'y', 'lat', 'lon']
csvwriter = csv.DictWriter(csvfile, fields)
csvwriter.writeheader()
# Write attributes and kml out to csv
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(2056)
transform = osr.CoordinateTransformation(source, target)
for i, feat in enumerate(lyr):
attributes = feat.items()
if attributes['name'] is None:
continue
geom = feat.GetGeometryRef()
geom_2056 = geom.Clone()
geom_2056.Transform(transform)
attributes['num'] = attributes.pop('osm_id')
attributes['weight'] = i + 1
# Remove accents in search string. See http://stackoverflow.com/a/15261831
attributes['search_string'] = ''.join((c for c in unicodedata.normalize('NFD', attributes['name']) if unicodedata.category(c) != 'Mn'))
attributes['label'] = attributes.pop('name')
attributes['origin'] = 'places'
attributes['geom_quadindex'] = 0
attributes['geom_st_box2d'] = 'BOX({y} {x}, {y} {x})'.format(y=geom_2056.GetX(), x=geom_2056.GetY())
attributes['rank'] = 10
attributes['x'] = geom_2056.GetY()
attributes['y'] = geom_2056.GetX()
attributes['lat'] = geom.GetY()
attributes['lon'] = geom.GetX()
del attributes['population']
del attributes['type']
csvwriter.writerow(attributes)
#clean up
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(1)
main()
|
|
ddc0b056af46bf50f3e5c4213f7598d645d378f7
|
openprescribing/matrixstore/tests/test_cachelib.py
|
openprescribing/matrixstore/tests/test_cachelib.py
|
from mock import Mock
import warnings
from django.core.cache import CacheKeyWarning
from django.test import SimpleTestCase, override_settings
from matrixstore.cachelib import memoize
# The local memory cache backend we use in testing warns that our binary cache
# keys won't be compatible with memcached, but we really don't care
warnings.simplefilter("ignore", CacheKeyWarning)
class MyTestObject:
cache_key = None
value = "hello"
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class MemoizeDecoratorTest(SimpleTestCase):
def test_cached_function_with_basic_arguments(self):
test_func = Mock(
side_effect=lambda *args, **kwargs: (args, kwargs), __qualname__="test_func"
)
cached_func = memoize()(test_func)
result = cached_func("bar", foo=12, flag=True)
self.assertEqual(result, (("bar",), {"foo": 12, "flag": True}))
result2 = cached_func("bar", foo=12, flag=True)
self.assertEqual(result2, result)
test_func.assert_called_once_with("bar", foo=12, flag=True)
def test_non_basic_arguments_with_cache_key_attr(self):
test_func = Mock(side_effect=lambda arg: arg.value, __qualname__="test_func2")
cached_func = memoize()(test_func)
# Make an object to use as an argument and give it a cache key
test_arg = MyTestObject()
test_arg.cache_key = b"123556789"
result = cached_func(test_arg)
self.assertEqual(result, "hello")
result2 = cached_func(test_arg)
self.assertEqual(result2, result)
test_func.assert_called_once_with(test_arg)
# Make a new argument with a different cache_key
new_test_arg = MyTestObject()
new_test_arg.cache_key = b"987654321"
cached_func(new_test_arg)
# Check that this results in a new call to the wrapped function
test_func.assert_called_with(new_test_arg)
self.assertEqual(test_func.call_count, 2)
def test_non_basic_arguments_without_cache_key_raise_error(self):
def test_func(arg):
return "foo"
cached_func = memoize()(test_func)
some_dict_arg = {}
with self.assertRaises(ValueError):
cached_func(some_dict_arg)
# This object should have a cache_key attribute but without a value so
# it should still raise an error
test_arg = MyTestObject()
with self.assertRaises(ValueError):
cached_func(test_arg)
|
Add tests for cachelib decorator
|
Add tests for cachelib decorator
|
Python
|
mit
|
ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing,ebmdatalab/openprescribing,ebmdatalab/openprescribing
|
Add tests for cachelib decorator
|
from mock import Mock
import warnings
from django.core.cache import CacheKeyWarning
from django.test import SimpleTestCase, override_settings
from matrixstore.cachelib import memoize
# The local memory cache backend we use in testing warns that our binary cache
# keys won't be compatible with memcached, but we really don't care
warnings.simplefilter("ignore", CacheKeyWarning)
class MyTestObject:
cache_key = None
value = "hello"
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class MemoizeDecoratorTest(SimpleTestCase):
def test_cached_function_with_basic_arguments(self):
test_func = Mock(
side_effect=lambda *args, **kwargs: (args, kwargs), __qualname__="test_func"
)
cached_func = memoize()(test_func)
result = cached_func("bar", foo=12, flag=True)
self.assertEqual(result, (("bar",), {"foo": 12, "flag": True}))
result2 = cached_func("bar", foo=12, flag=True)
self.assertEqual(result2, result)
test_func.assert_called_once_with("bar", foo=12, flag=True)
def test_non_basic_arguments_with_cache_key_attr(self):
test_func = Mock(side_effect=lambda arg: arg.value, __qualname__="test_func2")
cached_func = memoize()(test_func)
# Make an object to use as an argument and give it a cache key
test_arg = MyTestObject()
test_arg.cache_key = b"123556789"
result = cached_func(test_arg)
self.assertEqual(result, "hello")
result2 = cached_func(test_arg)
self.assertEqual(result2, result)
test_func.assert_called_once_with(test_arg)
# Make a new argument with a different cache_key
new_test_arg = MyTestObject()
new_test_arg.cache_key = b"987654321"
cached_func(new_test_arg)
# Check that this results in a new call to the wrapped function
test_func.assert_called_with(new_test_arg)
self.assertEqual(test_func.call_count, 2)
def test_non_basic_arguments_without_cache_key_raise_error(self):
def test_func(arg):
return "foo"
cached_func = memoize()(test_func)
some_dict_arg = {}
with self.assertRaises(ValueError):
cached_func(some_dict_arg)
# This object should have a cache_key attribute but without a value so
# it should still raise an error
test_arg = MyTestObject()
with self.assertRaises(ValueError):
cached_func(test_arg)
|
<commit_before><commit_msg>Add tests for cachelib decorator<commit_after>
|
from mock import Mock
import warnings
from django.core.cache import CacheKeyWarning
from django.test import SimpleTestCase, override_settings
from matrixstore.cachelib import memoize
# The local memory cache backend we use in testing warns that our binary cache
# keys won't be compatible with memcached, but we really don't care
warnings.simplefilter("ignore", CacheKeyWarning)
class MyTestObject:
cache_key = None
value = "hello"
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class MemoizeDecoratorTest(SimpleTestCase):
def test_cached_function_with_basic_arguments(self):
test_func = Mock(
side_effect=lambda *args, **kwargs: (args, kwargs), __qualname__="test_func"
)
cached_func = memoize()(test_func)
result = cached_func("bar", foo=12, flag=True)
self.assertEqual(result, (("bar",), {"foo": 12, "flag": True}))
result2 = cached_func("bar", foo=12, flag=True)
self.assertEqual(result2, result)
test_func.assert_called_once_with("bar", foo=12, flag=True)
def test_non_basic_arguments_with_cache_key_attr(self):
test_func = Mock(side_effect=lambda arg: arg.value, __qualname__="test_func2")
cached_func = memoize()(test_func)
# Make an object to use as an argument and give it a cache key
test_arg = MyTestObject()
test_arg.cache_key = b"123556789"
result = cached_func(test_arg)
self.assertEqual(result, "hello")
result2 = cached_func(test_arg)
self.assertEqual(result2, result)
test_func.assert_called_once_with(test_arg)
# Make a new argument with a different cache_key
new_test_arg = MyTestObject()
new_test_arg.cache_key = b"987654321"
cached_func(new_test_arg)
# Check that this results in a new call to the wrapped function
test_func.assert_called_with(new_test_arg)
self.assertEqual(test_func.call_count, 2)
def test_non_basic_arguments_without_cache_key_raise_error(self):
def test_func(arg):
return "foo"
cached_func = memoize()(test_func)
some_dict_arg = {}
with self.assertRaises(ValueError):
cached_func(some_dict_arg)
# This object should have a cache_key attribute but without a value so
# it should still raise an error
test_arg = MyTestObject()
with self.assertRaises(ValueError):
cached_func(test_arg)
|
Add tests for cachelib decoratorfrom mock import Mock
import warnings
from django.core.cache import CacheKeyWarning
from django.test import SimpleTestCase, override_settings
from matrixstore.cachelib import memoize
# The local memory cache backend we use in testing warns that our binary cache
# keys won't be compatible with memcached, but we really don't care
warnings.simplefilter("ignore", CacheKeyWarning)
class MyTestObject:
cache_key = None
value = "hello"
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class MemoizeDecoratorTest(SimpleTestCase):
def test_cached_function_with_basic_arguments(self):
test_func = Mock(
side_effect=lambda *args, **kwargs: (args, kwargs), __qualname__="test_func"
)
cached_func = memoize()(test_func)
result = cached_func("bar", foo=12, flag=True)
self.assertEqual(result, (("bar",), {"foo": 12, "flag": True}))
result2 = cached_func("bar", foo=12, flag=True)
self.assertEqual(result2, result)
test_func.assert_called_once_with("bar", foo=12, flag=True)
def test_non_basic_arguments_with_cache_key_attr(self):
test_func = Mock(side_effect=lambda arg: arg.value, __qualname__="test_func2")
cached_func = memoize()(test_func)
# Make an object to use as an argument and give it a cache key
test_arg = MyTestObject()
test_arg.cache_key = b"123556789"
result = cached_func(test_arg)
self.assertEqual(result, "hello")
result2 = cached_func(test_arg)
self.assertEqual(result2, result)
test_func.assert_called_once_with(test_arg)
# Make a new argument with a different cache_key
new_test_arg = MyTestObject()
new_test_arg.cache_key = b"987654321"
cached_func(new_test_arg)
# Check that this results in a new call to the wrapped function
test_func.assert_called_with(new_test_arg)
self.assertEqual(test_func.call_count, 2)
def test_non_basic_arguments_without_cache_key_raise_error(self):
def test_func(arg):
return "foo"
cached_func = memoize()(test_func)
some_dict_arg = {}
with self.assertRaises(ValueError):
cached_func(some_dict_arg)
# This object should have a cache_key attribute but without a value so
# it should still raise an error
test_arg = MyTestObject()
with self.assertRaises(ValueError):
cached_func(test_arg)
|
<commit_before><commit_msg>Add tests for cachelib decorator<commit_after>from mock import Mock
import warnings
from django.core.cache import CacheKeyWarning
from django.test import SimpleTestCase, override_settings
from matrixstore.cachelib import memoize
# The local memory cache backend we use in testing warns that our binary cache
# keys won't be compatible with memcached, but we really don't care
warnings.simplefilter("ignore", CacheKeyWarning)
class MyTestObject:
cache_key = None
value = "hello"
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class MemoizeDecoratorTest(SimpleTestCase):
def test_cached_function_with_basic_arguments(self):
test_func = Mock(
side_effect=lambda *args, **kwargs: (args, kwargs), __qualname__="test_func"
)
cached_func = memoize()(test_func)
result = cached_func("bar", foo=12, flag=True)
self.assertEqual(result, (("bar",), {"foo": 12, "flag": True}))
result2 = cached_func("bar", foo=12, flag=True)
self.assertEqual(result2, result)
test_func.assert_called_once_with("bar", foo=12, flag=True)
def test_non_basic_arguments_with_cache_key_attr(self):
test_func = Mock(side_effect=lambda arg: arg.value, __qualname__="test_func2")
cached_func = memoize()(test_func)
# Make an object to use as an argument and give it a cache key
test_arg = MyTestObject()
test_arg.cache_key = b"123556789"
result = cached_func(test_arg)
self.assertEqual(result, "hello")
result2 = cached_func(test_arg)
self.assertEqual(result2, result)
test_func.assert_called_once_with(test_arg)
# Make a new argument with a different cache_key
new_test_arg = MyTestObject()
new_test_arg.cache_key = b"987654321"
cached_func(new_test_arg)
# Check that this results in a new call to the wrapped function
test_func.assert_called_with(new_test_arg)
self.assertEqual(test_func.call_count, 2)
def test_non_basic_arguments_without_cache_key_raise_error(self):
def test_func(arg):
return "foo"
cached_func = memoize()(test_func)
some_dict_arg = {}
with self.assertRaises(ValueError):
cached_func(some_dict_arg)
# This object should have a cache_key attribute but without a value so
# it should still raise an error
test_arg = MyTestObject()
with self.assertRaises(ValueError):
cached_func(test_arg)
|
|
e02679577219cea30896532be9ba6aea457e2454
|
mixpanel_query/paginator.py
|
mixpanel_query/paginator.py
|
import math
import itertools
from multiprocessing.pool import ThreadPool
class ConcurrentPaginator(object):
"""
Concurrently fetches all pages in a paginated collection.
Currently, only the people API (`/api/2.0/engage`) supports pagination.
This class is designed to support the people API's implementation of
pagination.
"""
def __init__(self, get_func, concurrency=20):
"""
Initialize with a function that fetches a page of results.
`concurrency` controls the number of threads used to fetch pages.
Example:
client = MixpanelQueryClient(...)
ConcurrentPaginator(client.get_engage, concurrency=10)
"""
self.get_func = get_func
self.concurrency = concurrency
def fetch_all(self, params=None):
"""
Fetch all results from all pages, and return as a list.
If params need to be sent with each request (in addition to the
pagination) params, they may be passed in via the `params` kwarg.
"""
params = params and params.copy() or {}
first_page = self.get_func(**params)
results = first_page['results']
params['session_id'] = first_page['session_id']
start, end = self._remaining_page_range(first_page)
fetcher = self._results_fetcher(params)
return results + self._concurrent_flatmap(fetcher, range(start, end))
def _fetch(self, method, params=None):
return self.client.get_engage(params)
def _results_fetcher(self, params):
def _fetcher_func(page):
req_params = dict(params.items() + [('page', page)])
return self.get_func(**req_params)['results']
return _fetcher_func
def _concurrent_flatmap(self, func, iterable):
pool = ThreadPool(processes=self.concurrency)
return list(itertools.chain(*pool.map(func, iterable)))
def _remaining_page_range(self, response):
num_pages = math.ceil(response['total'] / float(response['page_size']))
return (response['page'] + 1, int(num_pages))
|
Add ConcurrentPaginator (for the engage API)
|
Add ConcurrentPaginator (for the engage API)
|
Python
|
mit
|
cooncesean/mixpanel-query-py
|
Add ConcurrentPaginator (for the engage API)
|
import math
import itertools
from multiprocessing.pool import ThreadPool
class ConcurrentPaginator(object):
"""
Concurrently fetches all pages in a paginated collection.
Currently, only the people API (`/api/2.0/engage`) supports pagination.
This class is designed to support the people API's implementation of
pagination.
"""
def __init__(self, get_func, concurrency=20):
"""
Initialize with a function that fetches a page of results.
`concurrency` controls the number of threads used to fetch pages.
Example:
client = MixpanelQueryClient(...)
ConcurrentPaginator(client.get_engage, concurrency=10)
"""
self.get_func = get_func
self.concurrency = concurrency
def fetch_all(self, params=None):
"""
Fetch all results from all pages, and return as a list.
If params need to be sent with each request (in addition to the
pagination) params, they may be passed in via the `params` kwarg.
"""
params = params and params.copy() or {}
first_page = self.get_func(**params)
results = first_page['results']
params['session_id'] = first_page['session_id']
start, end = self._remaining_page_range(first_page)
fetcher = self._results_fetcher(params)
return results + self._concurrent_flatmap(fetcher, range(start, end))
def _fetch(self, method, params=None):
return self.client.get_engage(params)
def _results_fetcher(self, params):
def _fetcher_func(page):
req_params = dict(params.items() + [('page', page)])
return self.get_func(**req_params)['results']
return _fetcher_func
def _concurrent_flatmap(self, func, iterable):
pool = ThreadPool(processes=self.concurrency)
return list(itertools.chain(*pool.map(func, iterable)))
def _remaining_page_range(self, response):
num_pages = math.ceil(response['total'] / float(response['page_size']))
return (response['page'] + 1, int(num_pages))
|
<commit_before><commit_msg>Add ConcurrentPaginator (for the engage API)<commit_after>
|
import math
import itertools
from multiprocessing.pool import ThreadPool
class ConcurrentPaginator(object):
"""
Concurrently fetches all pages in a paginated collection.
Currently, only the people API (`/api/2.0/engage`) supports pagination.
This class is designed to support the people API's implementation of
pagination.
"""
def __init__(self, get_func, concurrency=20):
"""
Initialize with a function that fetches a page of results.
`concurrency` controls the number of threads used to fetch pages.
Example:
client = MixpanelQueryClient(...)
ConcurrentPaginator(client.get_engage, concurrency=10)
"""
self.get_func = get_func
self.concurrency = concurrency
def fetch_all(self, params=None):
"""
Fetch all results from all pages, and return as a list.
If params need to be sent with each request (in addition to the
pagination) params, they may be passed in via the `params` kwarg.
"""
params = params and params.copy() or {}
first_page = self.get_func(**params)
results = first_page['results']
params['session_id'] = first_page['session_id']
start, end = self._remaining_page_range(first_page)
fetcher = self._results_fetcher(params)
return results + self._concurrent_flatmap(fetcher, range(start, end))
def _fetch(self, method, params=None):
return self.client.get_engage(params)
def _results_fetcher(self, params):
def _fetcher_func(page):
req_params = dict(params.items() + [('page', page)])
return self.get_func(**req_params)['results']
return _fetcher_func
def _concurrent_flatmap(self, func, iterable):
pool = ThreadPool(processes=self.concurrency)
return list(itertools.chain(*pool.map(func, iterable)))
def _remaining_page_range(self, response):
num_pages = math.ceil(response['total'] / float(response['page_size']))
return (response['page'] + 1, int(num_pages))
|
Add ConcurrentPaginator (for the engage API)import math
import itertools
from multiprocessing.pool import ThreadPool
class ConcurrentPaginator(object):
"""
Concurrently fetches all pages in a paginated collection.
Currently, only the people API (`/api/2.0/engage`) supports pagination.
This class is designed to support the people API's implementation of
pagination.
"""
def __init__(self, get_func, concurrency=20):
"""
Initialize with a function that fetches a page of results.
`concurrency` controls the number of threads used to fetch pages.
Example:
client = MixpanelQueryClient(...)
ConcurrentPaginator(client.get_engage, concurrency=10)
"""
self.get_func = get_func
self.concurrency = concurrency
def fetch_all(self, params=None):
"""
Fetch all results from all pages, and return as a list.
If params need to be sent with each request (in addition to the
pagination) params, they may be passed in via the `params` kwarg.
"""
params = params and params.copy() or {}
first_page = self.get_func(**params)
results = first_page['results']
params['session_id'] = first_page['session_id']
start, end = self._remaining_page_range(first_page)
fetcher = self._results_fetcher(params)
return results + self._concurrent_flatmap(fetcher, range(start, end))
def _fetch(self, method, params=None):
return self.client.get_engage(params)
def _results_fetcher(self, params):
def _fetcher_func(page):
req_params = dict(params.items() + [('page', page)])
return self.get_func(**req_params)['results']
return _fetcher_func
def _concurrent_flatmap(self, func, iterable):
pool = ThreadPool(processes=self.concurrency)
return list(itertools.chain(*pool.map(func, iterable)))
def _remaining_page_range(self, response):
num_pages = math.ceil(response['total'] / float(response['page_size']))
return (response['page'] + 1, int(num_pages))
|
<commit_before><commit_msg>Add ConcurrentPaginator (for the engage API)<commit_after>import math
import itertools
from multiprocessing.pool import ThreadPool
class ConcurrentPaginator(object):
"""
Concurrently fetches all pages in a paginated collection.
Currently, only the people API (`/api/2.0/engage`) supports pagination.
This class is designed to support the people API's implementation of
pagination.
"""
def __init__(self, get_func, concurrency=20):
"""
Initialize with a function that fetches a page of results.
`concurrency` controls the number of threads used to fetch pages.
Example:
client = MixpanelQueryClient(...)
ConcurrentPaginator(client.get_engage, concurrency=10)
"""
self.get_func = get_func
self.concurrency = concurrency
def fetch_all(self, params=None):
"""
Fetch all results from all pages, and return as a list.
If params need to be sent with each request (in addition to the
pagination) params, they may be passed in via the `params` kwarg.
"""
params = params and params.copy() or {}
first_page = self.get_func(**params)
results = first_page['results']
params['session_id'] = first_page['session_id']
start, end = self._remaining_page_range(first_page)
fetcher = self._results_fetcher(params)
return results + self._concurrent_flatmap(fetcher, range(start, end))
def _fetch(self, method, params=None):
return self.client.get_engage(params)
def _results_fetcher(self, params):
def _fetcher_func(page):
req_params = dict(params.items() + [('page', page)])
return self.get_func(**req_params)['results']
return _fetcher_func
def _concurrent_flatmap(self, func, iterable):
pool = ThreadPool(processes=self.concurrency)
return list(itertools.chain(*pool.map(func, iterable)))
def _remaining_page_range(self, response):
num_pages = math.ceil(response['total'] / float(response['page_size']))
return (response['page'] + 1, int(num_pages))
|
|
b224a281a11cdc4d0c632e451c217be34dccadcc
|
app/main/forms/suppliers.py
|
app/main/forms/suppliers.py
|
from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email
class EditSupplierForm(Form):
description = StringField()
clients = FieldList(StringField(), max_entries=10)
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
Add forms for supplier and contact information
|
Add forms for supplier and contact information
Forms allow us to validate basic field requirements before making
a request to the API, since the API doesn't return per-field errors
at the moment.
Supplier and contact information have separate forms, since they
require separate API requests to save the data and might end up
with separate views in the future.
|
Python
|
mit
|
mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend
|
Add forms for supplier and contact information
Forms allow us to validate basic field requirements before making
a request to the API, since the API doesn't return per-field errors
at the moment.
Supplier and contact information have separate forms, since they
require separate API requests to save the data and might end up
with separate views in the future.
|
from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email
class EditSupplierForm(Form):
description = StringField()
clients = FieldList(StringField(), max_entries=10)
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
<commit_before><commit_msg>Add forms for supplier and contact information
Forms allow us to validate basic field requirements before making
a request to the API, since the API doesn't return per-field errors
at the moment.
Supplier and contact information have separate forms, since they
require separate API requests to save the data and might end up
with separate views in the future.<commit_after>
|
from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email
class EditSupplierForm(Form):
description = StringField()
clients = FieldList(StringField(), max_entries=10)
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
Add forms for supplier and contact information
Forms allow us to validate basic field requirements before making
a request to the API, since the API doesn't return per-field errors
at the moment.
Supplier and contact information have separate forms, since they
require separate API requests to save the data and might end up
with separate views in the future.from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email
class EditSupplierForm(Form):
description = StringField()
clients = FieldList(StringField(), max_entries=10)
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
<commit_before><commit_msg>Add forms for supplier and contact information
Forms allow us to validate basic field requirements before making
a request to the API, since the API doesn't return per-field errors
at the moment.
Supplier and contact information have separate forms, since they
require separate API requests to save the data and might end up
with separate views in the future.<commit_after>from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, FieldList
from wtforms.validators import DataRequired, Email
class EditSupplierForm(Form):
description = StringField()
clients = FieldList(StringField(), max_entries=10)
class EditContactInformationForm(Form):
id = IntegerField()
address1 = StringField('Business address')
address2 = StringField('Business address')
city = StringField('Town or city')
country = StringField()
postcode = StringField(validators=[
DataRequired(message="Postcode can not be empty"),
])
website = StringField()
phoneNumber = StringField('Phone number')
email = StringField('Email address', validators=[
DataRequired(message="Email can not be empty"),
Email(message="Please enter a valid email address")
])
contactName = StringField('Contact name', validators=[
DataRequired(message="Contact name can not be empty"),
])
|
|
48a03c6e593e647e52a96aabcfc8ac02d7c93773
|
tfx/examples/imdb/imdb_fetch_data.py
|
tfx/examples/imdb/imdb_fetch_data.py
|
import os
import pandas as pd
import tensorflow_datasets as tfds
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
def fetch_data():
"""This downloads the full dataset to pwd/data/imdb.csv"""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode("utf-8")
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
|
Add fetch data py script
|
Add fetch data py script
Add .py file with code to fetch data defined in function fetch_data()
|
Python
|
apache-2.0
|
tensorflow/tfx,tensorflow/tfx
|
Add fetch data py script
Add .py file with code to fetch data defined in function fetch_data()
|
import os
import pandas as pd
import tensorflow_datasets as tfds
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
def fetch_data():
"""This downloads the full dataset to pwd/data/imdb.csv"""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode("utf-8")
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
|
<commit_before><commit_msg>Add fetch data py script
Add .py file with code to fetch data defined in function fetch_data()<commit_after>
|
import os
import pandas as pd
import tensorflow_datasets as tfds
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
def fetch_data():
"""This downloads the full dataset to pwd/data/imdb.csv"""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode("utf-8")
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
|
Add fetch data py script
Add .py file with code to fetch data defined in function fetch_data()import os
import pandas as pd
import tensorflow_datasets as tfds
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
def fetch_data():
"""This downloads the full dataset to pwd/data/imdb.csv"""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode("utf-8")
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
|
<commit_before><commit_msg>Add fetch data py script
Add .py file with code to fetch data defined in function fetch_data()<commit_after>import os
import pandas as pd
import tensorflow_datasets as tfds
# Example use in another file of this directory:
# import imdb_fetch_data as full_data
# full_data.fetch_data()
def fetch_data():
"""This downloads the full dataset to pwd/data/imdb.csv"""
ds = tfds.load('imdb_reviews', split='train+test')
numpy_ds = tfds.as_numpy(ds)
df = pd.DataFrame(numpy_ds)
df['text'] = df['text'].str.decode("utf-8")
dst_path = os.getcwd() + '/data/imdb.csv'
df.to_csv(dst_path, index=False)
if __name__ == '__main__':
fetch_data()
|
|
4d14acb3f805c26f1d917639f82974a006df9d4a
|
2048/test_2048.py
|
2048/test_2048.py
|
from __future__ import print_function
import numpy as np
import math
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import csv
import sys
nb_train_samples = 48355
def load_data(csvf):
X=np.zeros((nb_train_samples, 1, 4, 4), dtype="uint16")
Y=[]
i=0
with open(csvf, 'rb') as f:
for l in csv.reader(f):
if len(l)<3: continue
Y.append(int(l[0]))
X[i,0,:,:] = np.reshape([int(j) for j in l[2:]], (4,4))
i+=1
Y=np.reshape(Y, (len(Y), 1))
return (X, Y)
# the data, shuffled and split between train and test sets
(X_train, y_train) = load_data(sys.argv[1])
ll=np.vectorize(lambda x:math.log(x+1))
#X_train = X_train.reshape(X_train.shape[0], 1, 4, 4)
X_train = ll(X_train.astype('float32'))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
Y_train = y_train.astype('float32')
model = model_from_json(open('2048_model.json', 'rb').read())
model.load_weights('2048_weights.h5')
#score = model.evaluate(X_train, y_train, batch_size=16)
print(X_train)
score = model.predict(X_train, batch_size=1)
print(score)
|
Test qlearning4k result for 2048
|
Test qlearning4k result for 2048
|
Python
|
mit
|
choupi/NDHUDLWorkshop
|
Test qlearning4k result for 2048
|
from __future__ import print_function
import numpy as np
import math
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import csv
import sys
nb_train_samples = 48355
def load_data(csvf):
X=np.zeros((nb_train_samples, 1, 4, 4), dtype="uint16")
Y=[]
i=0
with open(csvf, 'rb') as f:
for l in csv.reader(f):
if len(l)<3: continue
Y.append(int(l[0]))
X[i,0,:,:] = np.reshape([int(j) for j in l[2:]], (4,4))
i+=1
Y=np.reshape(Y, (len(Y), 1))
return (X, Y)
# the data, shuffled and split between train and test sets
(X_train, y_train) = load_data(sys.argv[1])
ll=np.vectorize(lambda x:math.log(x+1))
#X_train = X_train.reshape(X_train.shape[0], 1, 4, 4)
X_train = ll(X_train.astype('float32'))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
Y_train = y_train.astype('float32')
model = model_from_json(open('2048_model.json', 'rb').read())
model.load_weights('2048_weights.h5')
#score = model.evaluate(X_train, y_train, batch_size=16)
print(X_train)
score = model.predict(X_train, batch_size=1)
print(score)
|
<commit_before><commit_msg>Test qlearning4k result for 2048<commit_after>
|
from __future__ import print_function
import numpy as np
import math
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import csv
import sys
nb_train_samples = 48355
def load_data(csvf):
X=np.zeros((nb_train_samples, 1, 4, 4), dtype="uint16")
Y=[]
i=0
with open(csvf, 'rb') as f:
for l in csv.reader(f):
if len(l)<3: continue
Y.append(int(l[0]))
X[i,0,:,:] = np.reshape([int(j) for j in l[2:]], (4,4))
i+=1
Y=np.reshape(Y, (len(Y), 1))
return (X, Y)
# the data, shuffled and split between train and test sets
(X_train, y_train) = load_data(sys.argv[1])
ll=np.vectorize(lambda x:math.log(x+1))
#X_train = X_train.reshape(X_train.shape[0], 1, 4, 4)
X_train = ll(X_train.astype('float32'))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
Y_train = y_train.astype('float32')
model = model_from_json(open('2048_model.json', 'rb').read())
model.load_weights('2048_weights.h5')
#score = model.evaluate(X_train, y_train, batch_size=16)
print(X_train)
score = model.predict(X_train, batch_size=1)
print(score)
|
Test qlearning4k result for 2048from __future__ import print_function
import numpy as np
import math
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import csv
import sys
nb_train_samples = 48355
def load_data(csvf):
X=np.zeros((nb_train_samples, 1, 4, 4), dtype="uint16")
Y=[]
i=0
with open(csvf, 'rb') as f:
for l in csv.reader(f):
if len(l)<3: continue
Y.append(int(l[0]))
X[i,0,:,:] = np.reshape([int(j) for j in l[2:]], (4,4))
i+=1
Y=np.reshape(Y, (len(Y), 1))
return (X, Y)
# the data, shuffled and split between train and test sets
(X_train, y_train) = load_data(sys.argv[1])
ll=np.vectorize(lambda x:math.log(x+1))
#X_train = X_train.reshape(X_train.shape[0], 1, 4, 4)
X_train = ll(X_train.astype('float32'))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
Y_train = y_train.astype('float32')
model = model_from_json(open('2048_model.json', 'rb').read())
model.load_weights('2048_weights.h5')
#score = model.evaluate(X_train, y_train, batch_size=16)
print(X_train)
score = model.predict(X_train, batch_size=1)
print(score)
|
<commit_before><commit_msg>Test qlearning4k result for 2048<commit_after>from __future__ import print_function
import numpy as np
import math
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import csv
import sys
nb_train_samples = 48355
def load_data(csvf):
X=np.zeros((nb_train_samples, 1, 4, 4), dtype="uint16")
Y=[]
i=0
with open(csvf, 'rb') as f:
for l in csv.reader(f):
if len(l)<3: continue
Y.append(int(l[0]))
X[i,0,:,:] = np.reshape([int(j) for j in l[2:]], (4,4))
i+=1
Y=np.reshape(Y, (len(Y), 1))
return (X, Y)
# the data, shuffled and split between train and test sets
(X_train, y_train) = load_data(sys.argv[1])
ll=np.vectorize(lambda x:math.log(x+1))
#X_train = X_train.reshape(X_train.shape[0], 1, 4, 4)
X_train = ll(X_train.astype('float32'))
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
Y_train = y_train.astype('float32')
model = model_from_json(open('2048_model.json', 'rb').read())
model.load_weights('2048_weights.h5')
#score = model.evaluate(X_train, y_train, batch_size=16)
print(X_train)
score = model.predict(X_train, batch_size=1)
print(score)
|
|
70c3fbbd4aee9a5dfd75c6891fe479ba894ace38
|
strategic_form.py
|
strategic_form.py
|
from numpy import array
def transpose(payoff_matrix):
return array(payoff_matrix).transpose().tolist()
def get_best_responses(payoff_matrix):
# Select argmax from each row, and return the result as a list
return list(map(lambda x: (payoff_matrix.index(x), x.index(max(x))), payoff_matrix))
def solve_psne_2(payoff_matrix_p1, payoff_matrix_p2):
# Transpose payoff matrix for player 1, and get best responses
indices_p1 = get_best_responses(transpose(payoff_matrix_p1))
# Swap values in each pair of indices (i.e., reverse transposition)
indices_p1 = list(map(lambda x: (x[1], x[0]), indices_p1))
# Get best responses for player 2
indices_p2 = get_best_responses(payoff_matrix_p2)
# Return PSNE (if exist)
matched_responses = list(map(lambda x, y: x == y, indices_p1, indices_p2))
psne = [indices for indices in indices_p1 if matched_responses[indices_p1.index(indices)] == True]
return psne
def test(condition):
try:
assert condition
except AssertionError as e:
print("Test failed")
else:
print("Test successful")
if __name__ == '__main__':
### Test scenario1: Prisoner's dilemma
# Create payoff matrices for two players
p_matrix_p1 = [[-1, -10], [0, -3]]
p_matrix_p2 = [[-1, 0], [-10, -3]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(1, 1)])
### Test scenario2: Matching pennies
# Create payoff matrices for two players
p_matrix_p1 = [[-1, 1], [1, -1]]
p_matrix_p2 = [[1, -1], [-1, 1]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [])
### Test scenario3: Example 4.16 from Carter's book
# Create payoff matrices for two players
p_matrix_p1 = [[1, 4, 2], [4, 0, 4], [2, 3, 5]]
p_matrix_p2 = [[3, 2, 2], [0, 3, 1], [5, 4, 6]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(2, 2)])
|
Add algorithm for finding PSNE in two-player simultaneous-move games
|
Add algorithm for finding PSNE in two-player simultaneous-move games
|
Python
|
mit
|
kubkon/py-game-theory
|
Add algorithm for finding PSNE in two-player simultaneous-move games
|
from numpy import array
def transpose(payoff_matrix):
return array(payoff_matrix).transpose().tolist()
def get_best_responses(payoff_matrix):
# Select argmax from each row, and return the result as a list
return list(map(lambda x: (payoff_matrix.index(x), x.index(max(x))), payoff_matrix))
def solve_psne_2(payoff_matrix_p1, payoff_matrix_p2):
# Transpose payoff matrix for player 1, and get best responses
indices_p1 = get_best_responses(transpose(payoff_matrix_p1))
# Swap values in each pair of indices (i.e., reverse transposition)
indices_p1 = list(map(lambda x: (x[1], x[0]), indices_p1))
# Get best responses for player 2
indices_p2 = get_best_responses(payoff_matrix_p2)
# Return PSNE (if exist)
matched_responses = list(map(lambda x, y: x == y, indices_p1, indices_p2))
psne = [indices for indices in indices_p1 if matched_responses[indices_p1.index(indices)] == True]
return psne
def test(condition):
try:
assert condition
except AssertionError as e:
print("Test failed")
else:
print("Test successful")
if __name__ == '__main__':
### Test scenario1: Prisoner's dilemma
# Create payoff matrices for two players
p_matrix_p1 = [[-1, -10], [0, -3]]
p_matrix_p2 = [[-1, 0], [-10, -3]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(1, 1)])
### Test scenario2: Matching pennies
# Create payoff matrices for two players
p_matrix_p1 = [[-1, 1], [1, -1]]
p_matrix_p2 = [[1, -1], [-1, 1]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [])
### Test scenario3: Example 4.16 from Carter's book
# Create payoff matrices for two players
p_matrix_p1 = [[1, 4, 2], [4, 0, 4], [2, 3, 5]]
p_matrix_p2 = [[3, 2, 2], [0, 3, 1], [5, 4, 6]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(2, 2)])
|
<commit_before><commit_msg>Add algorithm for finding PSNE in two-player simultaneous-move games<commit_after>
|
from numpy import array
def transpose(payoff_matrix):
return array(payoff_matrix).transpose().tolist()
def get_best_responses(payoff_matrix):
# Select argmax from each row, and return the result as a list
return list(map(lambda x: (payoff_matrix.index(x), x.index(max(x))), payoff_matrix))
def solve_psne_2(payoff_matrix_p1, payoff_matrix_p2):
# Transpose payoff matrix for player 1, and get best responses
indices_p1 = get_best_responses(transpose(payoff_matrix_p1))
# Swap values in each pair of indices (i.e., reverse transposition)
indices_p1 = list(map(lambda x: (x[1], x[0]), indices_p1))
# Get best responses for player 2
indices_p2 = get_best_responses(payoff_matrix_p2)
# Return PSNE (if exist)
matched_responses = list(map(lambda x, y: x == y, indices_p1, indices_p2))
psne = [indices for indices in indices_p1 if matched_responses[indices_p1.index(indices)] == True]
return psne
def test(condition):
try:
assert condition
except AssertionError as e:
print("Test failed")
else:
print("Test successful")
if __name__ == '__main__':
### Test scenario1: Prisoner's dilemma
# Create payoff matrices for two players
p_matrix_p1 = [[-1, -10], [0, -3]]
p_matrix_p2 = [[-1, 0], [-10, -3]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(1, 1)])
### Test scenario2: Matching pennies
# Create payoff matrices for two players
p_matrix_p1 = [[-1, 1], [1, -1]]
p_matrix_p2 = [[1, -1], [-1, 1]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [])
### Test scenario3: Example 4.16 from Carter's book
# Create payoff matrices for two players
p_matrix_p1 = [[1, 4, 2], [4, 0, 4], [2, 3, 5]]
p_matrix_p2 = [[3, 2, 2], [0, 3, 1], [5, 4, 6]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(2, 2)])
|
Add algorithm for finding PSNE in two-player simultaneous-move gamesfrom numpy import array
def transpose(payoff_matrix):
return array(payoff_matrix).transpose().tolist()
def get_best_responses(payoff_matrix):
# Select argmax from each row, and return the result as a list
return list(map(lambda x: (payoff_matrix.index(x), x.index(max(x))), payoff_matrix))
def solve_psne_2(payoff_matrix_p1, payoff_matrix_p2):
# Transpose payoff matrix for player 1, and get best responses
indices_p1 = get_best_responses(transpose(payoff_matrix_p1))
# Swap values in each pair of indices (i.e., reverse transposition)
indices_p1 = list(map(lambda x: (x[1], x[0]), indices_p1))
# Get best responses for player 2
indices_p2 = get_best_responses(payoff_matrix_p2)
# Return PSNE (if exist)
matched_responses = list(map(lambda x, y: x == y, indices_p1, indices_p2))
psne = [indices for indices in indices_p1 if matched_responses[indices_p1.index(indices)] == True]
return psne
def test(condition):
try:
assert condition
except AssertionError as e:
print("Test failed")
else:
print("Test successful")
if __name__ == '__main__':
### Test scenario1: Prisoner's dilemma
# Create payoff matrices for two players
p_matrix_p1 = [[-1, -10], [0, -3]]
p_matrix_p2 = [[-1, 0], [-10, -3]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(1, 1)])
### Test scenario2: Matching pennies
# Create payoff matrices for two players
p_matrix_p1 = [[-1, 1], [1, -1]]
p_matrix_p2 = [[1, -1], [-1, 1]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [])
### Test scenario3: Example 4.16 from Carter's book
# Create payoff matrices for two players
p_matrix_p1 = [[1, 4, 2], [4, 0, 4], [2, 3, 5]]
p_matrix_p2 = [[3, 2, 2], [0, 3, 1], [5, 4, 6]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(2, 2)])
|
<commit_before><commit_msg>Add algorithm for finding PSNE in two-player simultaneous-move games<commit_after>from numpy import array
def transpose(payoff_matrix):
return array(payoff_matrix).transpose().tolist()
def get_best_responses(payoff_matrix):
# Select argmax from each row, and return the result as a list
return list(map(lambda x: (payoff_matrix.index(x), x.index(max(x))), payoff_matrix))
def solve_psne_2(payoff_matrix_p1, payoff_matrix_p2):
# Transpose payoff matrix for player 1, and get best responses
indices_p1 = get_best_responses(transpose(payoff_matrix_p1))
# Swap values in each pair of indices (i.e., reverse transposition)
indices_p1 = list(map(lambda x: (x[1], x[0]), indices_p1))
# Get best responses for player 2
indices_p2 = get_best_responses(payoff_matrix_p2)
# Return PSNE (if exist)
matched_responses = list(map(lambda x, y: x == y, indices_p1, indices_p2))
psne = [indices for indices in indices_p1 if matched_responses[indices_p1.index(indices)] == True]
return psne
def test(condition):
try:
assert condition
except AssertionError as e:
print("Test failed")
else:
print("Test successful")
if __name__ == '__main__':
### Test scenario1: Prisoner's dilemma
# Create payoff matrices for two players
p_matrix_p1 = [[-1, -10], [0, -3]]
p_matrix_p2 = [[-1, 0], [-10, -3]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(1, 1)])
### Test scenario2: Matching pennies
# Create payoff matrices for two players
p_matrix_p1 = [[-1, 1], [1, -1]]
p_matrix_p2 = [[1, -1], [-1, 1]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [])
### Test scenario3: Example 4.16 from Carter's book
# Create payoff matrices for two players
p_matrix_p1 = [[1, 4, 2], [4, 0, 4], [2, 3, 5]]
p_matrix_p2 = [[3, 2, 2], [0, 3, 1], [5, 4, 6]]
# Solve for PSNE
psne = solve_psne_2(p_matrix_p1, p_matrix_p2)
test(psne == [(2, 2)])
|
|
1c10d27733c5448aaf9aa47c19ff3b279b3f0174
|
yahoo_historical/tests/test_fetch.py
|
yahoo_historical/tests/test_fetch.py
|
from yahoo_historical import Fetcher
def test_get_historical():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_historical()
assert len(data) > 0
def test_get_dividends():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_dividends()
assert len(data) > 0
def test_get_splits():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_splits()
assert len(data) > 0
def test_get_date_price():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_price()
assert len(data) > 0
def test_get_date_volume():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_volume()
assert len(data) > 0
|
Add basic unit tests for CI job
|
Add basic unit tests for CI job
|
Python
|
mit
|
AndrewRPorter/yahoo-historical
|
Add basic unit tests for CI job
|
from yahoo_historical import Fetcher
def test_get_historical():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_historical()
assert len(data) > 0
def test_get_dividends():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_dividends()
assert len(data) > 0
def test_get_splits():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_splits()
assert len(data) > 0
def test_get_date_price():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_price()
assert len(data) > 0
def test_get_date_volume():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_volume()
assert len(data) > 0
|
<commit_before><commit_msg>Add basic unit tests for CI job<commit_after>
|
from yahoo_historical import Fetcher
def test_get_historical():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_historical()
assert len(data) > 0
def test_get_dividends():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_dividends()
assert len(data) > 0
def test_get_splits():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_splits()
assert len(data) > 0
def test_get_date_price():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_price()
assert len(data) > 0
def test_get_date_volume():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_volume()
assert len(data) > 0
|
Add basic unit tests for CI jobfrom yahoo_historical import Fetcher
def test_get_historical():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_historical()
assert len(data) > 0
def test_get_dividends():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_dividends()
assert len(data) > 0
def test_get_splits():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_splits()
assert len(data) > 0
def test_get_date_price():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_price()
assert len(data) > 0
def test_get_date_volume():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_volume()
assert len(data) > 0
|
<commit_before><commit_msg>Add basic unit tests for CI job<commit_after>from yahoo_historical import Fetcher
def test_get_historical():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_historical()
assert len(data) > 0
def test_get_dividends():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_dividends()
assert len(data) > 0
def test_get_splits():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_splits()
assert len(data) > 0
def test_get_date_price():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_price()
assert len(data) > 0
def test_get_date_volume():
data = Fetcher("AAPL", [2007, 1, 1], [2017, 1, 1]).get_date_volume()
assert len(data) > 0
|
|
da005126cdc6ac78a464ac55ae50f15c99c90b4a
|
bin/crosswalk_msa_county.py
|
bin/crosswalk_msa_county.py
|
"""build_2000_msa.py
Extract a usable crosswalk between 2000 Metropolitan areas and counties.
Reconstitute a shapefile for the 2000 MSAs from the counties.
Parameters
----------
99mfips.txt: Delineation of the 2000 Metropolitan Statistical Areas
Returns
-------
crosswalk_msa_county.csv
msa.shp
"""
#
# Parse the delineations provided by the OMB
#
begin_file = 22
end_file = 2173
msa = {}
with open('data/gz/99mfips.txt', 'r') as source:
line = source.readline()
l = 1
while line:
## Skip the non-data lines
if l < begin_file or l > end_file or line == "\n":
line = source.readline()
l+=1
continue
## Read the data
msa_fips = line[0:4].replace(" ", "")
pmsa_fips = line[8:12].replace(" ", "")
county_fips = line[24:29].replace(" ", "")
entity_fips = line[40:45].replace(" ", "")
name = line[48:88].replace("\n", "")
if pmsa_fips != "":
if county_fips == "":
msa[pmsa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[pmsa_fips]['counties'].append(county_fips)
else:
if county_fips == "":
msa[msa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[msa_fips]['counties'].append(county_fips)
## Iterate
line = source.readline()
l+=1
## Remove the (empty) CMSA
msa = {fip:data for fip, data in msa.iteritems()
if len(data['counties']) > 0}
#
# Save the crosswalk
#
with open("data/crosswalks/msa_county.csv", "w") as output:
output.write("MSA FIPS CODE\tCOUNTY FIPS CODE\n")
for city in msa:
for county in msa[city]['counties']:
output.write("%s\t%s\n"%(city, county))
|
Add script to extract the 2000 MSA to county correspondence
|
Add script to extract the 2000 MSA to county correspondence
|
Python
|
bsd-3-clause
|
rlouf/patterns-of-segregation
|
Add script to extract the 2000 MSA to county correspondence
|
"""build_2000_msa.py
Extract a usable crosswalk between 2000 Metropolitan areas and counties.
Reconstitute a shapefile for the 2000 MSAs from the counties.
Parameters
----------
99mfips.txt: Delineation of the 2000 Metropolitan Statistical Areas
Returns
-------
crosswalk_msa_county.csv
msa.shp
"""
#
# Parse the delineations provided by the OMB
#
begin_file = 22
end_file = 2173
msa = {}
with open('data/gz/99mfips.txt', 'r') as source:
line = source.readline()
l = 1
while line:
## Skip the non-data lines
if l < begin_file or l > end_file or line == "\n":
line = source.readline()
l+=1
continue
## Read the data
msa_fips = line[0:4].replace(" ", "")
pmsa_fips = line[8:12].replace(" ", "")
county_fips = line[24:29].replace(" ", "")
entity_fips = line[40:45].replace(" ", "")
name = line[48:88].replace("\n", "")
if pmsa_fips != "":
if county_fips == "":
msa[pmsa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[pmsa_fips]['counties'].append(county_fips)
else:
if county_fips == "":
msa[msa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[msa_fips]['counties'].append(county_fips)
## Iterate
line = source.readline()
l+=1
## Remove the (empty) CMSA
msa = {fip:data for fip, data in msa.iteritems()
if len(data['counties']) > 0}
#
# Save the crosswalk
#
with open("data/crosswalks/msa_county.csv", "w") as output:
output.write("MSA FIPS CODE\tCOUNTY FIPS CODE\n")
for city in msa:
for county in msa[city]['counties']:
output.write("%s\t%s\n"%(city, county))
|
<commit_before><commit_msg>Add script to extract the 2000 MSA to county correspondence<commit_after>
|
"""build_2000_msa.py
Extract a usable crosswalk between 2000 Metropolitan areas and counties.
Reconstitute a shapefile for the 2000 MSAs from the counties.
Parameters
----------
99mfips.txt: Delineation of the 2000 Metropolitan Statistical Areas
Returns
-------
crosswalk_msa_county.csv
msa.shp
"""
#
# Parse the delineations provided by the OMB
#
begin_file = 22
end_file = 2173
msa = {}
with open('data/gz/99mfips.txt', 'r') as source:
line = source.readline()
l = 1
while line:
## Skip the non-data lines
if l < begin_file or l > end_file or line == "\n":
line = source.readline()
l+=1
continue
## Read the data
msa_fips = line[0:4].replace(" ", "")
pmsa_fips = line[8:12].replace(" ", "")
county_fips = line[24:29].replace(" ", "")
entity_fips = line[40:45].replace(" ", "")
name = line[48:88].replace("\n", "")
if pmsa_fips != "":
if county_fips == "":
msa[pmsa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[pmsa_fips]['counties'].append(county_fips)
else:
if county_fips == "":
msa[msa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[msa_fips]['counties'].append(county_fips)
## Iterate
line = source.readline()
l+=1
## Remove the (empty) CMSA
msa = {fip:data for fip, data in msa.iteritems()
if len(data['counties']) > 0}
#
# Save the crosswalk
#
with open("data/crosswalks/msa_county.csv", "w") as output:
output.write("MSA FIPS CODE\tCOUNTY FIPS CODE\n")
for city in msa:
for county in msa[city]['counties']:
output.write("%s\t%s\n"%(city, county))
|
Add script to extract the 2000 MSA to county correspondence"""build_2000_msa.py
Extract a usable crosswalk between 2000 Metropolitan areas and counties.
Reconstitute a shapefile for the 2000 MSAs from the counties.
Parameters
----------
99mfips.txt: Delineation of the 2000 Metropolitan Statistical Areas
Returns
-------
crosswalk_msa_county.csv
msa.shp
"""
#
# Parse the delineations provided by the OMB
#
begin_file = 22
end_file = 2173
msa = {}
with open('data/gz/99mfips.txt', 'r') as source:
line = source.readline()
l = 1
while line:
## Skip the non-data lines
if l < begin_file or l > end_file or line == "\n":
line = source.readline()
l+=1
continue
## Read the data
msa_fips = line[0:4].replace(" ", "")
pmsa_fips = line[8:12].replace(" ", "")
county_fips = line[24:29].replace(" ", "")
entity_fips = line[40:45].replace(" ", "")
name = line[48:88].replace("\n", "")
if pmsa_fips != "":
if county_fips == "":
msa[pmsa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[pmsa_fips]['counties'].append(county_fips)
else:
if county_fips == "":
msa[msa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[msa_fips]['counties'].append(county_fips)
## Iterate
line = source.readline()
l+=1
## Remove the (empty) CMSA
msa = {fip:data for fip, data in msa.iteritems()
if len(data['counties']) > 0}
#
# Save the crosswalk
#
with open("data/crosswalks/msa_county.csv", "w") as output:
output.write("MSA FIPS CODE\tCOUNTY FIPS CODE\n")
for city in msa:
for county in msa[city]['counties']:
output.write("%s\t%s\n"%(city, county))
|
<commit_before><commit_msg>Add script to extract the 2000 MSA to county correspondence<commit_after>"""build_2000_msa.py
Extract a usable crosswalk between 2000 Metropolitan areas and counties.
Reconstitute a shapefile for the 2000 MSAs from the counties.
Parameters
----------
99mfips.txt: Delineation of the 2000 Metropolitan Statistical Areas
Returns
-------
crosswalk_msa_county.csv
msa.shp
"""
#
# Parse the delineations provided by the OMB
#
begin_file = 22
end_file = 2173
msa = {}
with open('data/gz/99mfips.txt', 'r') as source:
line = source.readline()
l = 1
while line:
## Skip the non-data lines
if l < begin_file or l > end_file or line == "\n":
line = source.readline()
l+=1
continue
## Read the data
msa_fips = line[0:4].replace(" ", "")
pmsa_fips = line[8:12].replace(" ", "")
county_fips = line[24:29].replace(" ", "")
entity_fips = line[40:45].replace(" ", "")
name = line[48:88].replace("\n", "")
if pmsa_fips != "":
if county_fips == "":
msa[pmsa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[pmsa_fips]['counties'].append(county_fips)
else:
if county_fips == "":
msa[msa_fips] = {'name':name,
'counties':[]}
else:
if entity_fips == "":
msa[msa_fips]['counties'].append(county_fips)
## Iterate
line = source.readline()
l+=1
## Remove the (empty) CMSA
msa = {fip:data for fip, data in msa.iteritems()
if len(data['counties']) > 0}
#
# Save the crosswalk
#
with open("data/crosswalks/msa_county.csv", "w") as output:
output.write("MSA FIPS CODE\tCOUNTY FIPS CODE\n")
for city in msa:
for county in msa[city]['counties']:
output.write("%s\t%s\n"%(city, county))
|
|
8054c9e06511041c0834f901e2e515e58100e8f7
|
bindings/const_generator.py
|
bindings/const_generator.py
|
import sys, re
INCL_DIR = '../include'
include = [
('/arm.h', 'ARM_'),
('/arm64.h', 'ARM64_'),
('/x86.h', 'X86_'),
('/mips.h', 'MIPS_'),
]
template = {
'java': {
'header': "// AUTOGENRATED FILE, DO NOT EDIT\npackage capstone;\n\npublic class %sconst {\n",
'footer': "}",
'line_format': '\tpublic static final int %s = %s;\n',
'out_file': 'java/capstone/%sconst.java',
}
}
def gen(templ):
global include, INCL_DIR
for target in include:
prefix = target[1];
outfile = open(templ['out_file'] %(prefix.capitalize()), 'w')
outfile.write(templ['header'] % (prefix.capitalize()))
lines = open(INCL_DIR + target[0]).readlines()
count = 0
for line in lines:
line = line.strip()
if line == '' or line.startswith('//'):
continue
if not line.startswith(prefix):
continue
tmp = line.strip().split(',')
for t in tmp:
t = t.strip()
if not t or t.startswith('//'): continue
f = re.split('\s+', t)
if f[0].startswith(prefix):
if len(f) > 1 and f[1] not in '//=':
print "Error: Unable to convert %s" % f
continue
elif len(f) > 1 and f[1] == '=':
rhs = f[2]
else:
rhs = str(count)
count += 1
if rhs == '0':
outfile.write("\n")
count = 1
outfile.write(templ['line_format'] %(f[0].strip(), rhs))
outfile.write(templ['footer'])
outfile.close()
def main():
if (sys.argv[1] == 'java'):
gen(template['java'])
else:
raise RuntimeError("Unsupported binding")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], " <bindings: java|python>"
sys.exit(1)
main()
|
Add a script to generate constant for binding
|
Add a script to generate constant for binding
|
Python
|
bsd-3-clause
|
dynm/capstone,07151129/capstone,nplanel/capstone,nplanel/capstone,bughoho/capstone,krytarowski/capstone,angelabier1/capstone,nplanel/capstone,07151129/capstone,bigendiansmalls/capstone,bowlofstew/capstone,pranith/capstone,code4bones/capstone,sigma-random/capstone,sephiroth99/capstone,NeilBryant/capstone,fvrmatteo/capstone,code4bones/capstone,nplanel/capstone,bughoho/capstone,bigendiansmalls/capstone,techvoltage/capstone,fvrmatteo/capstone,capturePointer/capstone,xia0pin9/capstone,zuloloxi/capstone,sigma-random/capstone,pyq881120/capstone,capturePointer/capstone,pyq881120/capstone,dynm/capstone,8l/capstone,zuloloxi/capstone,NeilBryant/capstone,pranith/capstone,07151129/capstone,pranith/capstone,zneak/capstone,techvoltage/capstone,pyq881120/capstone,pombredanne/capstone,xia0pin9/capstone,techvoltage/capstone,angelabier1/capstone,zneak/capstone,sephiroth99/capstone,code4bones/capstone,fvrmatteo/capstone,krytarowski/capstone,pranith/capstone,pyq881120/capstone,bigendiansmalls/capstone,bigendiansmalls/capstone,xia0pin9/capstone,zuloloxi/capstone,bughoho/capstone,pyq881120/capstone,zuloloxi/capstone,pombredanne/capstone,techvoltage/capstone,zneak/capstone,sigma-random/capstone,NeilBryant/capstone,AmesianX/capstone,krytarowski/capstone,krytarowski/capstone,AmesianX/capstone,sigma-random/capstone,nplanel/capstone,code4bones/capstone,sephiroth99/capstone,8l/capstone,bughoho/capstone,xia0pin9/capstone,xia0pin9/capstone,AmesianX/capstone,bSr43/capstone,bowlofstew/capstone,NeilBryant/capstone,dynm/capstone,dynm/capstone,fvrmatteo/capstone,bughoho/capstone,sephiroth99/capstone,pranith/capstone,8l/capstone,zuloloxi/capstone,pombredanne/capstone,AmesianX/capstone,angelabier1/capstone,capturePointer/capstone,bigendiansmalls/capstone,sigma-random/capstone,angelabier1/capstone,zuloloxi/capstone,AmesianX/capstone,pombredanne/capstone,sigma-random/capstone,capturePointer/capstone,bSr43/capstone,pombredanne/capstone,NeilBryant/capstone,NeilBryant/capstone,techvoltage/capstone,8l/capstone,sephiroth99/capstone,zneak/capstone,bughoho/capstone,bigendiansmalls/capstone,pyq881120/capstone,fvrmatteo/capstone,nplanel/capstone,xia0pin9/capstone,bowlofstew/capstone,code4bones/capstone,8l/capstone,AmesianX/capstone,bowlofstew/capstone,angelabier1/capstone,capturePointer/capstone,pranith/capstone,angelabier1/capstone,8l/capstone,bSr43/capstone,bSr43/capstone,zneak/capstone,bowlofstew/capstone,pranith/capstone,sigma-random/capstone,pyq881120/capstone,dynm/capstone,bSr43/capstone,07151129/capstone,code4bones/capstone,angelabier1/capstone,zneak/capstone,pombredanne/capstone,07151129/capstone,zuloloxi/capstone,pombredanne/capstone,07151129/capstone,NeilBryant/capstone,dynm/capstone,nplanel/capstone,capturePointer/capstone,code4bones/capstone,sephiroth99/capstone,AmesianX/capstone,bowlofstew/capstone,dynm/capstone,xia0pin9/capstone,techvoltage/capstone,capturePointer/capstone,bSr43/capstone,07151129/capstone,krytarowski/capstone,techvoltage/capstone,bSr43/capstone,zneak/capstone,krytarowski/capstone,fvrmatteo/capstone,bughoho/capstone,fvrmatteo/capstone,bowlofstew/capstone,bigendiansmalls/capstone,8l/capstone,krytarowski/capstone,sephiroth99/capstone
|
Add a script to generate constant for binding
|
import sys, re
INCL_DIR = '../include'
include = [
('/arm.h', 'ARM_'),
('/arm64.h', 'ARM64_'),
('/x86.h', 'X86_'),
('/mips.h', 'MIPS_'),
]
template = {
'java': {
'header': "// AUTOGENRATED FILE, DO NOT EDIT\npackage capstone;\n\npublic class %sconst {\n",
'footer': "}",
'line_format': '\tpublic static final int %s = %s;\n',
'out_file': 'java/capstone/%sconst.java',
}
}
def gen(templ):
global include, INCL_DIR
for target in include:
prefix = target[1];
outfile = open(templ['out_file'] %(prefix.capitalize()), 'w')
outfile.write(templ['header'] % (prefix.capitalize()))
lines = open(INCL_DIR + target[0]).readlines()
count = 0
for line in lines:
line = line.strip()
if line == '' or line.startswith('//'):
continue
if not line.startswith(prefix):
continue
tmp = line.strip().split(',')
for t in tmp:
t = t.strip()
if not t or t.startswith('//'): continue
f = re.split('\s+', t)
if f[0].startswith(prefix):
if len(f) > 1 and f[1] not in '//=':
print "Error: Unable to convert %s" % f
continue
elif len(f) > 1 and f[1] == '=':
rhs = f[2]
else:
rhs = str(count)
count += 1
if rhs == '0':
outfile.write("\n")
count = 1
outfile.write(templ['line_format'] %(f[0].strip(), rhs))
outfile.write(templ['footer'])
outfile.close()
def main():
if (sys.argv[1] == 'java'):
gen(template['java'])
else:
raise RuntimeError("Unsupported binding")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], " <bindings: java|python>"
sys.exit(1)
main()
|
<commit_before><commit_msg>Add a script to generate constant for binding<commit_after>
|
import sys, re
INCL_DIR = '../include'
include = [
('/arm.h', 'ARM_'),
('/arm64.h', 'ARM64_'),
('/x86.h', 'X86_'),
('/mips.h', 'MIPS_'),
]
template = {
'java': {
'header': "// AUTOGENRATED FILE, DO NOT EDIT\npackage capstone;\n\npublic class %sconst {\n",
'footer': "}",
'line_format': '\tpublic static final int %s = %s;\n',
'out_file': 'java/capstone/%sconst.java',
}
}
def gen(templ):
global include, INCL_DIR
for target in include:
prefix = target[1];
outfile = open(templ['out_file'] %(prefix.capitalize()), 'w')
outfile.write(templ['header'] % (prefix.capitalize()))
lines = open(INCL_DIR + target[0]).readlines()
count = 0
for line in lines:
line = line.strip()
if line == '' or line.startswith('//'):
continue
if not line.startswith(prefix):
continue
tmp = line.strip().split(',')
for t in tmp:
t = t.strip()
if not t or t.startswith('//'): continue
f = re.split('\s+', t)
if f[0].startswith(prefix):
if len(f) > 1 and f[1] not in '//=':
print "Error: Unable to convert %s" % f
continue
elif len(f) > 1 and f[1] == '=':
rhs = f[2]
else:
rhs = str(count)
count += 1
if rhs == '0':
outfile.write("\n")
count = 1
outfile.write(templ['line_format'] %(f[0].strip(), rhs))
outfile.write(templ['footer'])
outfile.close()
def main():
if (sys.argv[1] == 'java'):
gen(template['java'])
else:
raise RuntimeError("Unsupported binding")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], " <bindings: java|python>"
sys.exit(1)
main()
|
Add a script to generate constant for bindingimport sys, re
INCL_DIR = '../include'
include = [
('/arm.h', 'ARM_'),
('/arm64.h', 'ARM64_'),
('/x86.h', 'X86_'),
('/mips.h', 'MIPS_'),
]
template = {
'java': {
'header': "// AUTOGENRATED FILE, DO NOT EDIT\npackage capstone;\n\npublic class %sconst {\n",
'footer': "}",
'line_format': '\tpublic static final int %s = %s;\n',
'out_file': 'java/capstone/%sconst.java',
}
}
def gen(templ):
global include, INCL_DIR
for target in include:
prefix = target[1];
outfile = open(templ['out_file'] %(prefix.capitalize()), 'w')
outfile.write(templ['header'] % (prefix.capitalize()))
lines = open(INCL_DIR + target[0]).readlines()
count = 0
for line in lines:
line = line.strip()
if line == '' or line.startswith('//'):
continue
if not line.startswith(prefix):
continue
tmp = line.strip().split(',')
for t in tmp:
t = t.strip()
if not t or t.startswith('//'): continue
f = re.split('\s+', t)
if f[0].startswith(prefix):
if len(f) > 1 and f[1] not in '//=':
print "Error: Unable to convert %s" % f
continue
elif len(f) > 1 and f[1] == '=':
rhs = f[2]
else:
rhs = str(count)
count += 1
if rhs == '0':
outfile.write("\n")
count = 1
outfile.write(templ['line_format'] %(f[0].strip(), rhs))
outfile.write(templ['footer'])
outfile.close()
def main():
if (sys.argv[1] == 'java'):
gen(template['java'])
else:
raise RuntimeError("Unsupported binding")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], " <bindings: java|python>"
sys.exit(1)
main()
|
<commit_before><commit_msg>Add a script to generate constant for binding<commit_after>import sys, re
INCL_DIR = '../include'
include = [
('/arm.h', 'ARM_'),
('/arm64.h', 'ARM64_'),
('/x86.h', 'X86_'),
('/mips.h', 'MIPS_'),
]
template = {
'java': {
'header': "// AUTOGENRATED FILE, DO NOT EDIT\npackage capstone;\n\npublic class %sconst {\n",
'footer': "}",
'line_format': '\tpublic static final int %s = %s;\n',
'out_file': 'java/capstone/%sconst.java',
}
}
def gen(templ):
global include, INCL_DIR
for target in include:
prefix = target[1];
outfile = open(templ['out_file'] %(prefix.capitalize()), 'w')
outfile.write(templ['header'] % (prefix.capitalize()))
lines = open(INCL_DIR + target[0]).readlines()
count = 0
for line in lines:
line = line.strip()
if line == '' or line.startswith('//'):
continue
if not line.startswith(prefix):
continue
tmp = line.strip().split(',')
for t in tmp:
t = t.strip()
if not t or t.startswith('//'): continue
f = re.split('\s+', t)
if f[0].startswith(prefix):
if len(f) > 1 and f[1] not in '//=':
print "Error: Unable to convert %s" % f
continue
elif len(f) > 1 and f[1] == '=':
rhs = f[2]
else:
rhs = str(count)
count += 1
if rhs == '0':
outfile.write("\n")
count = 1
outfile.write(templ['line_format'] %(f[0].strip(), rhs))
outfile.write(templ['footer'])
outfile.close()
def main():
if (sys.argv[1] == 'java'):
gen(template['java'])
else:
raise RuntimeError("Unsupported binding")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage:", sys.argv[0], " <bindings: java|python>"
sys.exit(1)
main()
|
|
0eac761535f959a07acb0e611b415e2cbccd9a97
|
tests/sentry/web/frontend/test_organization_settings.py
|
tests/sentry/web/frontend/test_organization_settings.py
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Organization
from sentry.testutils import TestCase
class OrganizationSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-settings.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_saves(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.post(path, {'name': 'bar'})
assert resp.status_code == 302
organization = Organization.objects.get(id=organization.id)
assert organization.name == 'bar'
|
Add test for organization settings
|
Add test for organization settings
|
Python
|
bsd-3-clause
|
songyi199111/sentry,JackDanger/sentry,mvaled/sentry,jokey2k/sentry,felixbuenemann/sentry,drcapulet/sentry,Kryz/sentry,fotinakis/sentry,kevinastone/sentry,gencer/sentry,looker/sentry,kevinlondon/sentry,boneyao/sentry,mvaled/sentry,jean/sentry,llonchj/sentry,BuildingLink/sentry,gencer/sentry,korealerts1/sentry,vperron/sentry,argonemyth/sentry,wong2/sentry,JTCunning/sentry,TedaLIEz/sentry,beeftornado/sentry,BayanGroup/sentry,songyi199111/sentry,pauloschilling/sentry,llonchj/sentry,gg7/sentry,ifduyue/sentry,fuziontech/sentry,ifduyue/sentry,kevinastone/sentry,camilonova/sentry,boneyao/sentry,ngonzalvez/sentry,zenefits/sentry,kevinastone/sentry,alexm92/sentry,daevaorn/sentry,TedaLIEz/sentry,mitsuhiko/sentry,mvaled/sentry,wujuguang/sentry,jean/sentry,ngonzalvez/sentry,kevinlondon/sentry,BayanGroup/sentry,boneyao/sentry,pauloschilling/sentry,zenefits/sentry,songyi199111/sentry,jokey2k/sentry,ifduyue/sentry,argonemyth/sentry,Kryz/sentry,argonemyth/sentry,1tush/sentry,BuildingLink/sentry,hongliang5623/sentry,llonchj/sentry,looker/sentry,alexm92/sentry,fotinakis/sentry,JTCunning/sentry,gencer/sentry,camilonova/sentry,drcapulet/sentry,looker/sentry,JamesMura/sentry,fotinakis/sentry,mvaled/sentry,zenefits/sentry,Natim/sentry,gg7/sentry,BuildingLink/sentry,wong2/sentry,daevaorn/sentry,Natim/sentry,fuziontech/sentry,Kryz/sentry,JamesMura/sentry,1tush/sentry,wujuguang/sentry,JamesMura/sentry,looker/sentry,BuildingLink/sentry,mitsuhiko/sentry,drcapulet/sentry,wujuguang/sentry,JTCunning/sentry,ewdurbin/sentry,daevaorn/sentry,jean/sentry,alexm92/sentry,JackDanger/sentry,zenefits/sentry,korealerts1/sentry,vperron/sentry,camilonova/sentry,JamesMura/sentry,daevaorn/sentry,imankulov/sentry,imankulov/sentry,zenefits/sentry,nicholasserra/sentry,JamesMura/sentry,gencer/sentry,ewdurbin/sentry,hongliang5623/sentry,ewdurbin/sentry,TedaLIEz/sentry,BayanGroup/sentry,jokey2k/sentry,mvaled/sentry,BuildingLink/sentry,vperron/sentry,beeftornado/sentry,hongliang5623/sentry,looker/sentry,felixbuenemann/sentry,fuziontech/sentry,jean/sentry,beeftornado/sentry,nicholasserra/sentry,Natim/sentry,pauloschilling/sentry,mvaled/sentry,jean/sentry,ngonzalvez/sentry,fotinakis/sentry,JackDanger/sentry,ifduyue/sentry,gg7/sentry,wong2/sentry,imankulov/sentry,nicholasserra/sentry,1tush/sentry,kevinlondon/sentry,felixbuenemann/sentry,korealerts1/sentry,ifduyue/sentry,gencer/sentry
|
Add test for organization settings
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Organization
from sentry.testutils import TestCase
class OrganizationSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-settings.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_saves(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.post(path, {'name': 'bar'})
assert resp.status_code == 302
organization = Organization.objects.get(id=organization.id)
assert organization.name == 'bar'
|
<commit_before><commit_msg>Add test for organization settings<commit_after>
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Organization
from sentry.testutils import TestCase
class OrganizationSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-settings.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_saves(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.post(path, {'name': 'bar'})
assert resp.status_code == 302
organization = Organization.objects.get(id=organization.id)
assert organization.name == 'bar'
|
Add test for organization settingsfrom __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Organization
from sentry.testutils import TestCase
class OrganizationSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-settings.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_saves(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.post(path, {'name': 'bar'})
assert resp.status_code == 302
organization = Organization.objects.get(id=organization.id)
assert organization.name == 'bar'
|
<commit_before><commit_msg>Add test for organization settings<commit_after>from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Organization
from sentry.testutils import TestCase
class OrganizationSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-settings.html')
assert resp.context['organization'] == organization
assert resp.context['form']
def test_saves(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-settings', args=[organization.id])
self.login_as(self.user)
resp = self.client.post(path, {'name': 'bar'})
assert resp.status_code == 302
organization = Organization.objects.get(id=organization.id)
assert organization.name == 'bar'
|
|
e29962ede32106984b41454b7cbfbf0217bef7fe
|
tools/rebuild_all.py
|
tools/rebuild_all.py
|
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
@contextmanager
def ignore_no_file():
try:
yield
except FileNotFoundError:
pass
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
with ignore_no_file():
print('Removing @Pythia.zip')
os.remove('@Pythia.zip')
with ignore_no_file():
print('Removing Pythia Python 64-bit installation')
shutil.rmtree(os.path.realpath(os.path.join('@Pythia', 'python-37-embed-amd64')))
with ignore_no_file():
print('Removing Pythia Python 32-bit installation')
shutil.rmtree(os.path.join('@Pythia', 'python-37-embed-win32'))
subprocess.run([sys.executable, os.path.join('tools', 'make_pbos.py')], check=True)
subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '@Pythia'], check=True)
print('Packing the resulting mod to a zip file')
shutil.make_archive('@Pythia', 'zip', root_dir='.', base_dir='@Pythia')
# TODO: Use an empty directory to build
# TODO: Add building of the dlls
# TODO: Fix https://github.com/overfl0/Pythia/issues/41 to build the dlls
|
Add a script to simplify building releases
|
Add a script to simplify building releases
|
Python
|
mit
|
overfl0/Pythia,overfl0/Pythia,overfl0/Pythia,overfl0/Pythia
|
Add a script to simplify building releases
|
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
@contextmanager
def ignore_no_file():
try:
yield
except FileNotFoundError:
pass
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
with ignore_no_file():
print('Removing @Pythia.zip')
os.remove('@Pythia.zip')
with ignore_no_file():
print('Removing Pythia Python 64-bit installation')
shutil.rmtree(os.path.realpath(os.path.join('@Pythia', 'python-37-embed-amd64')))
with ignore_no_file():
print('Removing Pythia Python 32-bit installation')
shutil.rmtree(os.path.join('@Pythia', 'python-37-embed-win32'))
subprocess.run([sys.executable, os.path.join('tools', 'make_pbos.py')], check=True)
subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '@Pythia'], check=True)
print('Packing the resulting mod to a zip file')
shutil.make_archive('@Pythia', 'zip', root_dir='.', base_dir='@Pythia')
# TODO: Use an empty directory to build
# TODO: Add building of the dlls
# TODO: Fix https://github.com/overfl0/Pythia/issues/41 to build the dlls
|
<commit_before><commit_msg>Add a script to simplify building releases<commit_after>
|
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
@contextmanager
def ignore_no_file():
try:
yield
except FileNotFoundError:
pass
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
with ignore_no_file():
print('Removing @Pythia.zip')
os.remove('@Pythia.zip')
with ignore_no_file():
print('Removing Pythia Python 64-bit installation')
shutil.rmtree(os.path.realpath(os.path.join('@Pythia', 'python-37-embed-amd64')))
with ignore_no_file():
print('Removing Pythia Python 32-bit installation')
shutil.rmtree(os.path.join('@Pythia', 'python-37-embed-win32'))
subprocess.run([sys.executable, os.path.join('tools', 'make_pbos.py')], check=True)
subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '@Pythia'], check=True)
print('Packing the resulting mod to a zip file')
shutil.make_archive('@Pythia', 'zip', root_dir='.', base_dir='@Pythia')
# TODO: Use an empty directory to build
# TODO: Add building of the dlls
# TODO: Fix https://github.com/overfl0/Pythia/issues/41 to build the dlls
|
Add a script to simplify building releasesimport os
import shutil
import subprocess
import sys
from contextlib import contextmanager
@contextmanager
def ignore_no_file():
try:
yield
except FileNotFoundError:
pass
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
with ignore_no_file():
print('Removing @Pythia.zip')
os.remove('@Pythia.zip')
with ignore_no_file():
print('Removing Pythia Python 64-bit installation')
shutil.rmtree(os.path.realpath(os.path.join('@Pythia', 'python-37-embed-amd64')))
with ignore_no_file():
print('Removing Pythia Python 32-bit installation')
shutil.rmtree(os.path.join('@Pythia', 'python-37-embed-win32'))
subprocess.run([sys.executable, os.path.join('tools', 'make_pbos.py')], check=True)
subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '@Pythia'], check=True)
print('Packing the resulting mod to a zip file')
shutil.make_archive('@Pythia', 'zip', root_dir='.', base_dir='@Pythia')
# TODO: Use an empty directory to build
# TODO: Add building of the dlls
# TODO: Fix https://github.com/overfl0/Pythia/issues/41 to build the dlls
|
<commit_before><commit_msg>Add a script to simplify building releases<commit_after>import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
@contextmanager
def ignore_no_file():
try:
yield
except FileNotFoundError:
pass
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
with ignore_no_file():
print('Removing @Pythia.zip')
os.remove('@Pythia.zip')
with ignore_no_file():
print('Removing Pythia Python 64-bit installation')
shutil.rmtree(os.path.realpath(os.path.join('@Pythia', 'python-37-embed-amd64')))
with ignore_no_file():
print('Removing Pythia Python 32-bit installation')
shutil.rmtree(os.path.join('@Pythia', 'python-37-embed-win32'))
subprocess.run([sys.executable, os.path.join('tools', 'make_pbos.py')], check=True)
subprocess.run([sys.executable, os.path.join('tools', 'create_embedded_python.py'), '@Pythia'], check=True)
print('Packing the resulting mod to a zip file')
shutil.make_archive('@Pythia', 'zip', root_dir='.', base_dir='@Pythia')
# TODO: Use an empty directory to build
# TODO: Add building of the dlls
# TODO: Fix https://github.com/overfl0/Pythia/issues/41 to build the dlls
|
|
1621e19c5bf6deacc54a017115c47175a6e52765
|
tests/test_api.py
|
tests/test_api.py
|
"""Tests the isort API module"""
import pytest
from isort import api, exceptions
def test_sort_file_invalid_syntax(tmpdir) -> None:
"""Test to ensure file encoding is respected"""
tmp_file = tmpdir.join(f"test_bad_syntax.py")
tmp_file.write_text("""print('mismathing quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
|
Add initial testing module for api.py
|
Add initial testing module for api.py
|
Python
|
mit
|
PyCQA/isort,PyCQA/isort
|
Add initial testing module for api.py
|
"""Tests the isort API module"""
import pytest
from isort import api, exceptions
def test_sort_file_invalid_syntax(tmpdir) -> None:
"""Test to ensure file encoding is respected"""
tmp_file = tmpdir.join(f"test_bad_syntax.py")
tmp_file.write_text("""print('mismathing quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
|
<commit_before><commit_msg>Add initial testing module for api.py<commit_after>
|
"""Tests the isort API module"""
import pytest
from isort import api, exceptions
def test_sort_file_invalid_syntax(tmpdir) -> None:
"""Test to ensure file encoding is respected"""
tmp_file = tmpdir.join(f"test_bad_syntax.py")
tmp_file.write_text("""print('mismathing quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
|
Add initial testing module for api.py"""Tests the isort API module"""
import pytest
from isort import api, exceptions
def test_sort_file_invalid_syntax(tmpdir) -> None:
"""Test to ensure file encoding is respected"""
tmp_file = tmpdir.join(f"test_bad_syntax.py")
tmp_file.write_text("""print('mismathing quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
|
<commit_before><commit_msg>Add initial testing module for api.py<commit_after>"""Tests the isort API module"""
import pytest
from isort import api, exceptions
def test_sort_file_invalid_syntax(tmpdir) -> None:
"""Test to ensure file encoding is respected"""
tmp_file = tmpdir.join(f"test_bad_syntax.py")
tmp_file.write_text("""print('mismathing quotes")""", "utf8")
with pytest.warns(UserWarning):
api.sort_file(tmp_file, atomic=True)
|
|
51ab60836c58c7d0d10c637482919a9acbc31efe
|
scripts/create_database.py
|
scripts/create_database.py
|
#!/usr/bin/env python
"""Create the initial database structure.
Existing tables will be ignored, and those not existing will be created.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.database import db
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
click.echo('Creating database ... ', nl=False)
db.create_all()
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to create initial database structure
|
Add script to create initial database structure
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
|
Add script to create initial database structure
|
#!/usr/bin/env python
"""Create the initial database structure.
Existing tables will be ignored, and those not existing will be created.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.database import db
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
click.echo('Creating database ... ', nl=False)
db.create_all()
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to create initial database structure<commit_after>
|
#!/usr/bin/env python
"""Create the initial database structure.
Existing tables will be ignored, and those not existing will be created.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.database import db
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
click.echo('Creating database ... ', nl=False)
db.create_all()
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to create initial database structure#!/usr/bin/env python
"""Create the initial database structure.
Existing tables will be ignored, and those not existing will be created.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.database import db
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
click.echo('Creating database ... ', nl=False)
db.create_all()
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to create initial database structure<commit_after>#!/usr/bin/env python
"""Create the initial database structure.
Existing tables will be ignored, and those not existing will be created.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.database import db
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
click.echo('Creating database ... ', nl=False)
db.create_all()
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
7855f7ca4f7ea96bf8bf85a5d3fb49a666ddc6cc
|
src/python/QueryParuetFile.py
|
src/python/QueryParuetFile.py
|
# Finds the names of people who like pandas from a parquet file
# consisting of name & favouriteAnimal.
# For input you can use the result of MakeParquetFile
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: QueryParquetFile [sparkmaster] [parquetfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
parquetFile = sys.argv[3]
sc = SparkContext(master, "MakeParquetFile")
sqlCtx = SQLContext(sc)
# Load some data into an RDD
rdd = sc.textFile(inputFile).map(lambda l: l.split(","))
namedRdd = rdd.map(lambda r: {"name": r[0], "favouriteAnimal": r[1]})
schemaNamedRdd = sqlCtx.inferSchema(namedRdd)
# Save it
schemaNamedRdd.saveAsParquetFile(parquetFile)
|
Add a python parquet file query example
|
Add a python parquet file query example
|
Python
|
mit
|
zaxliu/learning-spark,bhagatsingh/learning-spark,bhagatsingh/learning-spark,XiaoqingWang/learning-spark,kpraveen420/learning-spark,holdenk/learning-spark-examples,zaxliu/learning-spark,diogoaurelio/learning-spark,ellis429/learning-spark-examples,huixiang/learning-spark,concerned3rdparty/learning-spark,zaxliu/learning-spark,dsdinter/learning-spark-examples,tengteng/learning-spark,jaehyuk/learning-spark,qingkaikong/learning-spark-examples,ellis429/learning-spark,shimizust/learning-spark,diogoaurelio/learning-spark,zaxliu/learning-spark,concerned3rdparty/learning-spark,negokaz/learning-spark,NBSW/learning-spark,DINESHKUMARMURUGAN/learning-spark,DINESHKUMARMURUGAN/learning-spark,kpraveen420/learning-spark,jindalcastle/learning-spark,kod3r/learning-spark,huixiang/learning-spark,NBSW/learning-spark,kpraveen420/learning-spark,holdenk/learning-spark-examples,mmirolim/learning-spark,JerryTseng/learning-spark,ellis429/learning-spark-examples,junwucs/learning-spark,jaehyuk/learning-spark,mohitsh/learning-spark,feynman0825/learning-spark,ellis429/learning-spark-examples,negokaz/learning-spark,XiaoqingWang/learning-spark,NBSW/learning-spark,qingkaikong/learning-spark-examples,ellis429/learning-spark,ramyasrigangula/learning-spark,XiaoqingWang/learning-spark,obinsanni/learning-spark,JerryTseng/learning-spark,jaehyuk/learning-spark,junwucs/learning-spark,baokunguo/learning-spark-examples,coursera4ashok/learning-spark,anjuncc/learning-spark-examples,kod3r/learning-spark,SunGuo/learning-spark,kod3r/learning-spark,huydx/learning-spark,SunGuo/learning-spark,ramyasrigangula/learning-spark,tengteng/learning-spark,gaoxuesong/learning-spark,qingkaikong/learning-spark-examples,gaoxuesong/learning-spark,jaehyuk/learning-spark,coursera4ashok/learning-spark,negokaz/learning-spark,GatsbyNewton/learning-spark,concerned3rdparty/learning-spark,jaehyuk/learning-spark,SunGuo/learning-spark,shimizust/learning-spark,coursera4ashok/learning-spark,GatsbyNewton/learning-spark,databricks/learning-spark,kod3r/learning-spark,huydx/learning-spark,dsdinter/learning-spark-examples,mohitsh/learning-spark,tengteng/learning-spark,JerryTseng/learning-spark,mmirolim/learning-spark,ellis429/learning-spark,UsterNes/learning-spark,gaoxuesong/learning-spark,obinsanni/learning-spark,rex1100/learning-spark,feynman0825/learning-spark,huydx/learning-spark,tengteng/learning-spark,ellis429/learning-spark,junwucs/learning-spark,baokunguo/learning-spark-examples,obinsanni/learning-spark,NBSW/learning-spark,holdenk/learning-spark-examples,asarraf/learning-spark,UsterNes/learning-spark,anjuncc/learning-spark-examples,kpraveen420/learning-spark,negokaz/learning-spark,diogoaurelio/learning-spark,mohitsh/learning-spark,jindalcastle/learning-spark,kpraveen420/learning-spark,GatsbyNewton/learning-spark,mohitsh/learning-spark,mmirolim/learning-spark,huydx/learning-spark,mmirolim/learning-spark,ellis429/learning-spark-examples,ramyasrigangula/learning-spark,junwucs/learning-spark,obinsanni/learning-spark,ellis429/learning-spark,anjuncc/learning-spark-examples,noprom/learning-spark,bhagatsingh/learning-spark,DINESHKUMARMURUGAN/learning-spark,mohitsh/learning-spark,asarraf/learning-spark,qingkaikong/learning-spark-examples,concerned3rdparty/learning-spark,gaoxuesong/learning-spark,jindalcastle/learning-spark,anjuncc/learning-spark-examples,noprom/learning-spark,coursera4ashok/learning-spark,DINESHKUMARMURUGAN/learning-spark,baokunguo/learning-spark-examples,feynman0825/learning-spark,DINESHKUMARMURUGAN/learning-spark,obinsanni/learning-spark,concerned3rdparty/learning-spark,UsterNes/learning-spark,qingkaikong/learning-spark-examples,asarraf/learning-spark,kod3r/learning-spark,bhagatsingh/learning-spark,holdenk/learning-spark-examples,coursera4ashok/learning-spark,ramyasrigangula/learning-spark,feynman0825/learning-spark,asarraf/learning-spark,dsdinter/learning-spark-examples,shimizust/learning-spark,JerryTseng/learning-spark,UsterNes/learning-spark,diogoaurelio/learning-spark,ellis429/learning-spark-examples,XiaoqingWang/learning-spark,noprom/learning-spark,databricks/learning-spark,huixiang/learning-spark,holdenk/learning-spark-examples,shimizust/learning-spark,jindalcastle/learning-spark,asarraf/learning-spark,baokunguo/learning-spark-examples,feynman0825/learning-spark,UsterNes/learning-spark,shimizust/learning-spark,zaxliu/learning-spark,huixiang/learning-spark,SunGuo/learning-spark,anjuncc/learning-spark-examples,rex1100/learning-spark,dsdinter/learning-spark-examples,databricks/learning-spark,databricks/learning-spark,negokaz/learning-spark,huixiang/learning-spark,NBSW/learning-spark,mmirolim/learning-spark,huydx/learning-spark,ramyasrigangula/learning-spark,dsdinter/learning-spark-examples,rex1100/learning-spark,junwucs/learning-spark,JerryTseng/learning-spark,tengteng/learning-spark,jindalcastle/learning-spark,SunGuo/learning-spark,gaoxuesong/learning-spark,bhagatsingh/learning-spark,diogoaurelio/learning-spark,baokunguo/learning-spark-examples,databricks/learning-spark,GatsbyNewton/learning-spark,noprom/learning-spark,XiaoqingWang/learning-spark,GatsbyNewton/learning-spark,noprom/learning-spark
|
Add a python parquet file query example
|
# Finds the names of people who like pandas from a parquet file
# consisting of name & favouriteAnimal.
# For input you can use the result of MakeParquetFile
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: QueryParquetFile [sparkmaster] [parquetfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
parquetFile = sys.argv[3]
sc = SparkContext(master, "MakeParquetFile")
sqlCtx = SQLContext(sc)
# Load some data into an RDD
rdd = sc.textFile(inputFile).map(lambda l: l.split(","))
namedRdd = rdd.map(lambda r: {"name": r[0], "favouriteAnimal": r[1]})
schemaNamedRdd = sqlCtx.inferSchema(namedRdd)
# Save it
schemaNamedRdd.saveAsParquetFile(parquetFile)
|
<commit_before><commit_msg>Add a python parquet file query example<commit_after>
|
# Finds the names of people who like pandas from a parquet file
# consisting of name & favouriteAnimal.
# For input you can use the result of MakeParquetFile
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: QueryParquetFile [sparkmaster] [parquetfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
parquetFile = sys.argv[3]
sc = SparkContext(master, "MakeParquetFile")
sqlCtx = SQLContext(sc)
# Load some data into an RDD
rdd = sc.textFile(inputFile).map(lambda l: l.split(","))
namedRdd = rdd.map(lambda r: {"name": r[0], "favouriteAnimal": r[1]})
schemaNamedRdd = sqlCtx.inferSchema(namedRdd)
# Save it
schemaNamedRdd.saveAsParquetFile(parquetFile)
|
Add a python parquet file query example# Finds the names of people who like pandas from a parquet file
# consisting of name & favouriteAnimal.
# For input you can use the result of MakeParquetFile
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: QueryParquetFile [sparkmaster] [parquetfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
parquetFile = sys.argv[3]
sc = SparkContext(master, "MakeParquetFile")
sqlCtx = SQLContext(sc)
# Load some data into an RDD
rdd = sc.textFile(inputFile).map(lambda l: l.split(","))
namedRdd = rdd.map(lambda r: {"name": r[0], "favouriteAnimal": r[1]})
schemaNamedRdd = sqlCtx.inferSchema(namedRdd)
# Save it
schemaNamedRdd.saveAsParquetFile(parquetFile)
|
<commit_before><commit_msg>Add a python parquet file query example<commit_after># Finds the names of people who like pandas from a parquet file
# consisting of name & favouriteAnimal.
# For input you can use the result of MakeParquetFile
from pyspark import SparkContext
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Error usage: QueryParquetFile [sparkmaster] [parquetfile]"
sys.exit(-1)
master = sys.argv[1]
inputFile = sys.argv[2]
parquetFile = sys.argv[3]
sc = SparkContext(master, "MakeParquetFile")
sqlCtx = SQLContext(sc)
# Load some data into an RDD
rdd = sc.textFile(inputFile).map(lambda l: l.split(","))
namedRdd = rdd.map(lambda r: {"name": r[0], "favouriteAnimal": r[1]})
schemaNamedRdd = sqlCtx.inferSchema(namedRdd)
# Save it
schemaNamedRdd.saveAsParquetFile(parquetFile)
|
|
65eaffabe9927d17847a6a8ce9f39e92e130ab6b
|
Challenge2.py
|
Challenge2.py
|
#Program to find the n largest lines in a file
import operator
import sys
#Read the file from terminal
test_cases = open(sys_argv[1],'r')
#Remove white spaces and empty lines and make a list for all the test in test_cases
tests = [test_cases.strip() for test in test_cases if not test == '\n']
#The first element in the file is the number of lines to print
num = int(tests.pop(0))
#read every line and find the length, Create a dictionary with key as the line length
dict_for_lines = dict(zip([len(line) for line in tests], tests))
#Using the sorted function sort the lines
sorted_lines = sorted(dict_for_lines.iteritems(), key = operator.itemgetter(0), reverse = TRUE)
print '\n'.join([line for length, line in sorted_lines[:num]])
test_cases.close()
|
Read file and print n largest lines
|
Read file and print n largest lines
|
Python
|
mit
|
mailpraveens/Python-Experiments
|
Read file and print n largest lines
|
#Program to find the n largest lines in a file
import operator
import sys
#Read the file from terminal
test_cases = open(sys_argv[1],'r')
#Remove white spaces and empty lines and make a list for all the test in test_cases
tests = [test_cases.strip() for test in test_cases if not test == '\n']
#The first element in the file is the number of lines to print
num = int(tests.pop(0))
#read every line and find the length, Create a dictionary with key as the line length
dict_for_lines = dict(zip([len(line) for line in tests], tests))
#Using the sorted function sort the lines
sorted_lines = sorted(dict_for_lines.iteritems(), key = operator.itemgetter(0), reverse = TRUE)
print '\n'.join([line for length, line in sorted_lines[:num]])
test_cases.close()
|
<commit_before><commit_msg>Read file and print n largest lines<commit_after>
|
#Program to find the n largest lines in a file
import operator
import sys
#Read the file from terminal
test_cases = open(sys_argv[1],'r')
#Remove white spaces and empty lines and make a list for all the test in test_cases
tests = [test_cases.strip() for test in test_cases if not test == '\n']
#The first element in the file is the number of lines to print
num = int(tests.pop(0))
#read every line and find the length, Create a dictionary with key as the line length
dict_for_lines = dict(zip([len(line) for line in tests], tests))
#Using the sorted function sort the lines
sorted_lines = sorted(dict_for_lines.iteritems(), key = operator.itemgetter(0), reverse = TRUE)
print '\n'.join([line for length, line in sorted_lines[:num]])
test_cases.close()
|
Read file and print n largest lines#Program to find the n largest lines in a file
import operator
import sys
#Read the file from terminal
test_cases = open(sys_argv[1],'r')
#Remove white spaces and empty lines and make a list for all the test in test_cases
tests = [test_cases.strip() for test in test_cases if not test == '\n']
#The first element in the file is the number of lines to print
num = int(tests.pop(0))
#read every line and find the length, Create a dictionary with key as the line length
dict_for_lines = dict(zip([len(line) for line in tests], tests))
#Using the sorted function sort the lines
sorted_lines = sorted(dict_for_lines.iteritems(), key = operator.itemgetter(0), reverse = TRUE)
print '\n'.join([line for length, line in sorted_lines[:num]])
test_cases.close()
|
<commit_before><commit_msg>Read file and print n largest lines<commit_after>#Program to find the n largest lines in a file
import operator
import sys
#Read the file from terminal
test_cases = open(sys_argv[1],'r')
#Remove white spaces and empty lines and make a list for all the test in test_cases
tests = [test_cases.strip() for test in test_cases if not test == '\n']
#The first element in the file is the number of lines to print
num = int(tests.pop(0))
#read every line and find the length, Create a dictionary with key as the line length
dict_for_lines = dict(zip([len(line) for line in tests], tests))
#Using the sorted function sort the lines
sorted_lines = sorted(dict_for_lines.iteritems(), key = operator.itemgetter(0), reverse = TRUE)
print '\n'.join([line for length, line in sorted_lines[:num]])
test_cases.close()
|
|
09eea0de71ac2f2c4f9cee040a14874638dcb097
|
bin/neighbourhoods_categories.py
|
bin/neighbourhoods_categories.py
|
"""neighbourhoods_categories.py
Find the tracts where each category is over-represented or all cities in the
dataset.
"""
import csv
import marble as mb
#
# Import a list of MSA
#
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
#
# Extract neighbourhoods and save
#
for i, city in enumerate(msa):
print "Extract neighbourhoods for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
## Extract neighbourhoods
neigh = mb.overrepresented_units(households)
## Save the list of areal units per class
with open('extr/neighbourhoods/categories/msa/%s.csv'%city, 'w') as output:
for cat in sorted(neigh.iterkeys()):
for bkgp in neigh[cat]:
output.write("%s\t%s\n"%(cat, bkgp))
|
Add script to find areal units where each category is represented (and test marble)
|
Add script to find areal units where each category is represented (and test marble)
|
Python
|
bsd-3-clause
|
rlouf/patterns-of-segregation
|
Add script to find areal units where each category is represented (and test marble)
|
"""neighbourhoods_categories.py
Find the tracts where each category is over-represented or all cities in the
dataset.
"""
import csv
import marble as mb
#
# Import a list of MSA
#
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
#
# Extract neighbourhoods and save
#
for i, city in enumerate(msa):
print "Extract neighbourhoods for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
## Extract neighbourhoods
neigh = mb.overrepresented_units(households)
## Save the list of areal units per class
with open('extr/neighbourhoods/categories/msa/%s.csv'%city, 'w') as output:
for cat in sorted(neigh.iterkeys()):
for bkgp in neigh[cat]:
output.write("%s\t%s\n"%(cat, bkgp))
|
<commit_before><commit_msg>Add script to find areal units where each category is represented (and test marble)<commit_after>
|
"""neighbourhoods_categories.py
Find the tracts where each category is over-represented or all cities in the
dataset.
"""
import csv
import marble as mb
#
# Import a list of MSA
#
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
#
# Extract neighbourhoods and save
#
for i, city in enumerate(msa):
print "Extract neighbourhoods for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
## Extract neighbourhoods
neigh = mb.overrepresented_units(households)
## Save the list of areal units per class
with open('extr/neighbourhoods/categories/msa/%s.csv'%city, 'w') as output:
for cat in sorted(neigh.iterkeys()):
for bkgp in neigh[cat]:
output.write("%s\t%s\n"%(cat, bkgp))
|
Add script to find areal units where each category is represented (and test marble)"""neighbourhoods_categories.py
Find the tracts where each category is over-represented or all cities in the
dataset.
"""
import csv
import marble as mb
#
# Import a list of MSA
#
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
#
# Extract neighbourhoods and save
#
for i, city in enumerate(msa):
print "Extract neighbourhoods for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
## Extract neighbourhoods
neigh = mb.overrepresented_units(households)
## Save the list of areal units per class
with open('extr/neighbourhoods/categories/msa/%s.csv'%city, 'w') as output:
for cat in sorted(neigh.iterkeys()):
for bkgp in neigh[cat]:
output.write("%s\t%s\n"%(cat, bkgp))
|
<commit_before><commit_msg>Add script to find areal units where each category is represented (and test marble)<commit_after>"""neighbourhoods_categories.py
Find the tracts where each category is over-represented or all cities in the
dataset.
"""
import csv
import marble as mb
#
# Import a list of MSA
#
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
#
# Extract neighbourhoods and save
#
for i, city in enumerate(msa):
print "Extract neighbourhoods for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
households = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
## Extract neighbourhoods
neigh = mb.overrepresented_units(households)
## Save the list of areal units per class
with open('extr/neighbourhoods/categories/msa/%s.csv'%city, 'w') as output:
for cat in sorted(neigh.iterkeys()):
for bkgp in neigh[cat]:
output.write("%s\t%s\n"%(cat, bkgp))
|
|
3777358287dd4f2ec485d7d53c29d83bf6f56974
|
migrations/versions/0368_move_orgs_to_nhs_branding_.py
|
migrations/versions/0368_move_orgs_to_nhs_branding_.py
|
"""
Revision ID: 0368_move_orgs_to_nhs_branding
Revises: 0367_add_reach
Create Date: 2022-04-12 18:22:12.069016
"""
from alembic import op
revision = '0368_move_orgs_to_nhs_branding'
down_revision = '0367_add_reach'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
UPDATE
organisation
SET
email_branding_id = 'a7dc4e56-660b-4db7-8cff-12c37b12b5ea'
WHERE
organisation_type IN ('nhs_central', 'nhs_local', 'nhs_gp')
AND
email_branding_id IS NULL
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Move existing nhs orgs without branding onto nhs branding
|
Move existing nhs orgs without branding onto nhs branding
This is done to make self-service branding easier to implement,
and also because NHS branding makes much more sense for services
in those orgs than GOV.UK branding.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Move existing nhs orgs without branding onto nhs branding
This is done to make self-service branding easier to implement,
and also because NHS branding makes much more sense for services
in those orgs than GOV.UK branding.
|
"""
Revision ID: 0368_move_orgs_to_nhs_branding
Revises: 0367_add_reach
Create Date: 2022-04-12 18:22:12.069016
"""
from alembic import op
revision = '0368_move_orgs_to_nhs_branding'
down_revision = '0367_add_reach'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
UPDATE
organisation
SET
email_branding_id = 'a7dc4e56-660b-4db7-8cff-12c37b12b5ea'
WHERE
organisation_type IN ('nhs_central', 'nhs_local', 'nhs_gp')
AND
email_branding_id IS NULL
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Move existing nhs orgs without branding onto nhs branding
This is done to make self-service branding easier to implement,
and also because NHS branding makes much more sense for services
in those orgs than GOV.UK branding.<commit_after>
|
"""
Revision ID: 0368_move_orgs_to_nhs_branding
Revises: 0367_add_reach
Create Date: 2022-04-12 18:22:12.069016
"""
from alembic import op
revision = '0368_move_orgs_to_nhs_branding'
down_revision = '0367_add_reach'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
UPDATE
organisation
SET
email_branding_id = 'a7dc4e56-660b-4db7-8cff-12c37b12b5ea'
WHERE
organisation_type IN ('nhs_central', 'nhs_local', 'nhs_gp')
AND
email_branding_id IS NULL
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Move existing nhs orgs without branding onto nhs branding
This is done to make self-service branding easier to implement,
and also because NHS branding makes much more sense for services
in those orgs than GOV.UK branding."""
Revision ID: 0368_move_orgs_to_nhs_branding
Revises: 0367_add_reach
Create Date: 2022-04-12 18:22:12.069016
"""
from alembic import op
revision = '0368_move_orgs_to_nhs_branding'
down_revision = '0367_add_reach'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
UPDATE
organisation
SET
email_branding_id = 'a7dc4e56-660b-4db7-8cff-12c37b12b5ea'
WHERE
organisation_type IN ('nhs_central', 'nhs_local', 'nhs_gp')
AND
email_branding_id IS NULL
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Move existing nhs orgs without branding onto nhs branding
This is done to make self-service branding easier to implement,
and also because NHS branding makes much more sense for services
in those orgs than GOV.UK branding.<commit_after>"""
Revision ID: 0368_move_orgs_to_nhs_branding
Revises: 0367_add_reach
Create Date: 2022-04-12 18:22:12.069016
"""
from alembic import op
revision = '0368_move_orgs_to_nhs_branding'
down_revision = '0367_add_reach'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
UPDATE
organisation
SET
email_branding_id = 'a7dc4e56-660b-4db7-8cff-12c37b12b5ea'
WHERE
organisation_type IN ('nhs_central', 'nhs_local', 'nhs_gp')
AND
email_branding_id IS NULL
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
|
87e3e6b4930b73563027cb0e125ddd9b9c787d6d
|
api_tests/requests/views/test_request_action_list.py
|
api_tests/requests/views/test_request_action_list.py
|
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_cannot_view(self, app, noncontrib, write_contrib, admin, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib, admin]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
Add action list permissions tests
|
Add action list permissions tests
|
Python
|
apache-2.0
|
erinspace/osf.io,caseyrollins/osf.io,pattisdr/osf.io,erinspace/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,felliott/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,aaxelb/osf.io,felliott/osf.io,cslzchen/osf.io,mattclark/osf.io,baylee-d/osf.io,adlius/osf.io,cslzchen/osf.io,caseyrollins/osf.io,aaxelb/osf.io,saradbowman/osf.io,felliott/osf.io,mfraezz/osf.io,mfraezz/osf.io,saradbowman/osf.io,cslzchen/osf.io,adlius/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,felliott/osf.io,aaxelb/osf.io,cslzchen/osf.io,mfraezz/osf.io,caseyrollins/osf.io,adlius/osf.io,pattisdr/osf.io,mattclark/osf.io,adlius/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,erinspace/osf.io,HalcyonChimera/osf.io
|
Add action list permissions tests
|
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_cannot_view(self, app, noncontrib, write_contrib, admin, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib, admin]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
<commit_before><commit_msg>Add action list permissions tests<commit_after>
|
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_cannot_view(self, app, noncontrib, write_contrib, admin, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib, admin]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
Add action list permissions testsimport pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_cannot_view(self, app, noncontrib, write_contrib, admin, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib, admin]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
<commit_before><commit_msg>Add action list permissions tests<commit_after>import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import PreprintRequestTestMixin
@pytest.mark.django_db
class TestPreprintRequestActionList(PreprintRequestTestMixin):
def url(self, request):
return '/{}requests/{}/actions/'.format(API_BASE, request._id)
def test_nonmod_cannot_view(self, app, noncontrib, write_contrib, admin, pre_request, post_request, none_request):
for request in [pre_request, post_request, none_request]:
for user in [noncontrib, write_contrib, admin]:
res = app.get(self.url(request), auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_mod_can_view(self, app, moderator, pre_request, post_request, auto_approved_pre_request):
for request in [pre_request, post_request]:
res = app.get(self.url(request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['auto'] is False
res = app.get(self.url(auto_approved_pre_request), auth=moderator.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['auto'] is True
|
|
8aa9ea4d0c19a0ec5eb0312bc29a0fa00439c335
|
tests/formatter/test_xmler.py
|
tests/formatter/test_xmler.py
|
import unittest, argparse
from echolalia.formatter.xmler import Formatter
class XmlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
args = new_parser.parse_args(['--root', 'channel'])
self.assertEqual(args.root, 'channel')
args = new_parser.parse_args([])
self.assertEqual(args.root, 'document')
def test_marshall_default_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<document>\n <char>a</char>\n <order>1</order>\n</document>\n<document>\n <char>b</char>\n <order>2</order>\n</document>\n<document>\n <char>c</char>\n <order>3</order>\n</document>'
self.assertEqual(result, expect)
def test_marshall_custom_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args(['--root', 'channel'])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<channel>\n <char>a</char>\n <order>1</order>\n</channel>\n<channel>\n <char>b</char>\n <order>2</order>\n</channel>\n<channel>\n <char>c</char>\n <order>3</order>\n</channel>'
self.assertEqual(result, expect)
|
Add tests for formatter xml
|
Add tests for formatter xml
|
Python
|
mit
|
eiri/echolalia-prototype
|
Add tests for formatter xml
|
import unittest, argparse
from echolalia.formatter.xmler import Formatter
class XmlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
args = new_parser.parse_args(['--root', 'channel'])
self.assertEqual(args.root, 'channel')
args = new_parser.parse_args([])
self.assertEqual(args.root, 'document')
def test_marshall_default_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<document>\n <char>a</char>\n <order>1</order>\n</document>\n<document>\n <char>b</char>\n <order>2</order>\n</document>\n<document>\n <char>c</char>\n <order>3</order>\n</document>'
self.assertEqual(result, expect)
def test_marshall_custom_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args(['--root', 'channel'])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<channel>\n <char>a</char>\n <order>1</order>\n</channel>\n<channel>\n <char>b</char>\n <order>2</order>\n</channel>\n<channel>\n <char>c</char>\n <order>3</order>\n</channel>'
self.assertEqual(result, expect)
|
<commit_before><commit_msg>Add tests for formatter xml<commit_after>
|
import unittest, argparse
from echolalia.formatter.xmler import Formatter
class XmlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
args = new_parser.parse_args(['--root', 'channel'])
self.assertEqual(args.root, 'channel')
args = new_parser.parse_args([])
self.assertEqual(args.root, 'document')
def test_marshall_default_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<document>\n <char>a</char>\n <order>1</order>\n</document>\n<document>\n <char>b</char>\n <order>2</order>\n</document>\n<document>\n <char>c</char>\n <order>3</order>\n</document>'
self.assertEqual(result, expect)
def test_marshall_custom_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args(['--root', 'channel'])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<channel>\n <char>a</char>\n <order>1</order>\n</channel>\n<channel>\n <char>b</char>\n <order>2</order>\n</channel>\n<channel>\n <char>c</char>\n <order>3</order>\n</channel>'
self.assertEqual(result, expect)
|
Add tests for formatter xmlimport unittest, argparse
from echolalia.formatter.xmler import Formatter
class XmlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
args = new_parser.parse_args(['--root', 'channel'])
self.assertEqual(args.root, 'channel')
args = new_parser.parse_args([])
self.assertEqual(args.root, 'document')
def test_marshall_default_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<document>\n <char>a</char>\n <order>1</order>\n</document>\n<document>\n <char>b</char>\n <order>2</order>\n</document>\n<document>\n <char>c</char>\n <order>3</order>\n</document>'
self.assertEqual(result, expect)
def test_marshall_custom_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args(['--root', 'channel'])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<channel>\n <char>a</char>\n <order>1</order>\n</channel>\n<channel>\n <char>b</char>\n <order>2</order>\n</channel>\n<channel>\n <char>c</char>\n <order>3</order>\n</channel>'
self.assertEqual(result, expect)
|
<commit_before><commit_msg>Add tests for formatter xml<commit_after>import unittest, argparse
from echolalia.formatter.xmler import Formatter
class XmlerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{'char': chr(i), 'order': i - 96} for i in xrange(97, 100)]
self.formatter = Formatter()
def test_add_args(self):
new_parser = self.formatter.add_args(self.parser)
self.assertEqual(new_parser, self.parser)
args = new_parser.parse_args(['--root', 'channel'])
self.assertEqual(args.root, 'channel')
args = new_parser.parse_args([])
self.assertEqual(args.root, 'document')
def test_marshall_default_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args([])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<document>\n <char>a</char>\n <order>1</order>\n</document>\n<document>\n <char>b</char>\n <order>2</order>\n</document>\n<document>\n <char>c</char>\n <order>3</order>\n</document>'
self.assertEqual(result, expect)
def test_marshall_custom_root(self):
new_parser = self.formatter.add_args(self.parser)
args = new_parser.parse_args(['--root', 'channel'])
result = self.formatter.marshall(args, self.data)
expect = u'<?xml version="1.0" encoding="utf-8"?>\n<channel>\n <char>a</char>\n <order>1</order>\n</channel>\n<channel>\n <char>b</char>\n <order>2</order>\n</channel>\n<channel>\n <char>c</char>\n <order>3</order>\n</channel>'
self.assertEqual(result, expect)
|
|
96d556edf9c736f6c7e9deb70753762230933bc0
|
kargtom/twodim/LongestChain/longestChain_001.py
|
kargtom/twodim/LongestChain/longestChain_001.py
|
def longestChain(words):
if words == []:
return 0
lendict = {}
lenlist = []
# build the dictory where
# the key is the length of a word,
# and the value is the set of words with the length
for word in words:
l = len(word)
if len(word) in lendict:
lendict[l][word] = 1
else:
lendict[l] = {word : 1}
lenlist.append(l)
lenlist.sort()
maxsize = 1
for i in range(1, len(lenlist)):
l = lenlist[i]
if l - 1 in lendict:
group = lendict[l]
for word in group:
for j in range(len(word)):
nextword = word[:j] + word[j + 1:]
if nextword in lendict[l - 1]:
tmpsize = lendict[l - 1][nextword] + 1
if tmpsize > lendict[l][word]:
lendict[l][word] = tmpsize
if tmpsize > maxsize:
maxsize = tmpsize
return maxsize
words = ['a', 'abcd', 'bcd', 'abd', 'cd', 'c']
words = ['a', 'ab', 'abc', 'abcd', 'eeeeeeee', 'e']
print longestChain(words)
|
Create the bottom-up version of longestPath
|
Create the bottom-up version of longestPath
|
Python
|
mit
|
Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/codirit,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/cod,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/codirit,Chasego/codirit,Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/codirit
|
Create the bottom-up version of longestPath
|
def longestChain(words):
if words == []:
return 0
lendict = {}
lenlist = []
# build the dictory where
# the key is the length of a word,
# and the value is the set of words with the length
for word in words:
l = len(word)
if len(word) in lendict:
lendict[l][word] = 1
else:
lendict[l] = {word : 1}
lenlist.append(l)
lenlist.sort()
maxsize = 1
for i in range(1, len(lenlist)):
l = lenlist[i]
if l - 1 in lendict:
group = lendict[l]
for word in group:
for j in range(len(word)):
nextword = word[:j] + word[j + 1:]
if nextword in lendict[l - 1]:
tmpsize = lendict[l - 1][nextword] + 1
if tmpsize > lendict[l][word]:
lendict[l][word] = tmpsize
if tmpsize > maxsize:
maxsize = tmpsize
return maxsize
words = ['a', 'abcd', 'bcd', 'abd', 'cd', 'c']
words = ['a', 'ab', 'abc', 'abcd', 'eeeeeeee', 'e']
print longestChain(words)
|
<commit_before><commit_msg>Create the bottom-up version of longestPath<commit_after>
|
def longestChain(words):
if words == []:
return 0
lendict = {}
lenlist = []
# build the dictory where
# the key is the length of a word,
# and the value is the set of words with the length
for word in words:
l = len(word)
if len(word) in lendict:
lendict[l][word] = 1
else:
lendict[l] = {word : 1}
lenlist.append(l)
lenlist.sort()
maxsize = 1
for i in range(1, len(lenlist)):
l = lenlist[i]
if l - 1 in lendict:
group = lendict[l]
for word in group:
for j in range(len(word)):
nextword = word[:j] + word[j + 1:]
if nextword in lendict[l - 1]:
tmpsize = lendict[l - 1][nextword] + 1
if tmpsize > lendict[l][word]:
lendict[l][word] = tmpsize
if tmpsize > maxsize:
maxsize = tmpsize
return maxsize
words = ['a', 'abcd', 'bcd', 'abd', 'cd', 'c']
words = ['a', 'ab', 'abc', 'abcd', 'eeeeeeee', 'e']
print longestChain(words)
|
Create the bottom-up version of longestPathdef longestChain(words):
if words == []:
return 0
lendict = {}
lenlist = []
# build the dictory where
# the key is the length of a word,
# and the value is the set of words with the length
for word in words:
l = len(word)
if len(word) in lendict:
lendict[l][word] = 1
else:
lendict[l] = {word : 1}
lenlist.append(l)
lenlist.sort()
maxsize = 1
for i in range(1, len(lenlist)):
l = lenlist[i]
if l - 1 in lendict:
group = lendict[l]
for word in group:
for j in range(len(word)):
nextword = word[:j] + word[j + 1:]
if nextword in lendict[l - 1]:
tmpsize = lendict[l - 1][nextword] + 1
if tmpsize > lendict[l][word]:
lendict[l][word] = tmpsize
if tmpsize > maxsize:
maxsize = tmpsize
return maxsize
words = ['a', 'abcd', 'bcd', 'abd', 'cd', 'c']
words = ['a', 'ab', 'abc', 'abcd', 'eeeeeeee', 'e']
print longestChain(words)
|
<commit_before><commit_msg>Create the bottom-up version of longestPath<commit_after>def longestChain(words):
if words == []:
return 0
lendict = {}
lenlist = []
# build the dictory where
# the key is the length of a word,
# and the value is the set of words with the length
for word in words:
l = len(word)
if len(word) in lendict:
lendict[l][word] = 1
else:
lendict[l] = {word : 1}
lenlist.append(l)
lenlist.sort()
maxsize = 1
for i in range(1, len(lenlist)):
l = lenlist[i]
if l - 1 in lendict:
group = lendict[l]
for word in group:
for j in range(len(word)):
nextword = word[:j] + word[j + 1:]
if nextword in lendict[l - 1]:
tmpsize = lendict[l - 1][nextword] + 1
if tmpsize > lendict[l][word]:
lendict[l][word] = tmpsize
if tmpsize > maxsize:
maxsize = tmpsize
return maxsize
words = ['a', 'abcd', 'bcd', 'abd', 'cd', 'c']
words = ['a', 'ab', 'abc', 'abcd', 'eeeeeeee', 'e']
print longestChain(words)
|
|
f3c4cb70d1f9fab3b7cb497faa2cbe2350e2f116
|
axelrod/tests/unit/test_deterministic_cache.py
|
axelrod/tests/unit/test_deterministic_cache.py
|
import unittest
class TestDeterministicCache(unittest.TestCase):
def test_init(self):
pass
def test_setitem(self):
pass
def test_save(self):
pass
def test_load(self):
pass
|
Add stubs for deterministic cache tests
|
Add stubs for deterministic cache tests
|
Python
|
mit
|
marcharper/Axelrod,ranjinidas/Axelrod,marcharper/Axelrod,ranjinidas/Axelrod
|
Add stubs for deterministic cache tests
|
import unittest
class TestDeterministicCache(unittest.TestCase):
def test_init(self):
pass
def test_setitem(self):
pass
def test_save(self):
pass
def test_load(self):
pass
|
<commit_before><commit_msg>Add stubs for deterministic cache tests<commit_after>
|
import unittest
class TestDeterministicCache(unittest.TestCase):
def test_init(self):
pass
def test_setitem(self):
pass
def test_save(self):
pass
def test_load(self):
pass
|
Add stubs for deterministic cache testsimport unittest
class TestDeterministicCache(unittest.TestCase):
def test_init(self):
pass
def test_setitem(self):
pass
def test_save(self):
pass
def test_load(self):
pass
|
<commit_before><commit_msg>Add stubs for deterministic cache tests<commit_after>import unittest
class TestDeterministicCache(unittest.TestCase):
def test_init(self):
pass
def test_setitem(self):
pass
def test_save(self):
pass
def test_load(self):
pass
|
|
cb2c7126e1682c38a4efc92a1b89c04498d1926f
|
clsimplex.py
|
clsimplex.py
|
import pyopencl as cl
import numpy
import sys
import datetime
class NoiseGenerator(object):
def __init__(self, block_dim=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
if block_dim: self.block_dim = block_dim
else: self.block_dim = 256, 256, 256
def load_program(self, filename):
with open(filename, 'r') as f:
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def noise3d(self, xoff=0, yoff=0, zoff=0):
self.load_program('simplex.cl')
chunk_size = self.block_dim[0] * self.block_dim[1] * self.block_dim[2]
global_ws = (chunk_size,)
local_ws = None
mf = cl.mem_flags
res = numpy.empty(shape=global_ws, dtype=numpy.float32)
res_d = cl.Buffer(self.ctx, mf.WRITE_ONLY, numpy.float32(1).nbytes*chunk_size)
event = self.program.sdnoise3(
self.queue, global_ws, local_ws,
numpy.float32(xoff), numpy.float32(yoff), numpy.float32(zoff),
numpy.uint32(self.block_dim[0]), numpy.uint32(self.block_dim[1]), numpy.uint32(self.block_dim[2]),
res_d
)
cl.enqueue_read_buffer(self.queue, res_d, res).wait()
return res
def print_chunk(chunk, xdim, ydim, zdim):
for z in range(zdim):
sys.stdout.write('\n\n')
for x in range(xdim):
sys.stdout.write('\n')
for y in range(ydim):
val = chunk[x + y*ydim + z*(zdim**2)]
if val > 0: sys.stdout.write(' # ')
else: sys.stdout.write(' . ')
if __name__ == '__main__':
gen = NoiseGenerator()
noise = gen.noise3d()
print_chunk(noise, 32, 32, 32)
|
Add python host code using pyopencl
|
Add python host code using pyopencl
|
Python
|
mit
|
jakogut/clsimplex,jakogut/clsimplex
|
Add python host code using pyopencl
|
import pyopencl as cl
import numpy
import sys
import datetime
class NoiseGenerator(object):
def __init__(self, block_dim=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
if block_dim: self.block_dim = block_dim
else: self.block_dim = 256, 256, 256
def load_program(self, filename):
with open(filename, 'r') as f:
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def noise3d(self, xoff=0, yoff=0, zoff=0):
self.load_program('simplex.cl')
chunk_size = self.block_dim[0] * self.block_dim[1] * self.block_dim[2]
global_ws = (chunk_size,)
local_ws = None
mf = cl.mem_flags
res = numpy.empty(shape=global_ws, dtype=numpy.float32)
res_d = cl.Buffer(self.ctx, mf.WRITE_ONLY, numpy.float32(1).nbytes*chunk_size)
event = self.program.sdnoise3(
self.queue, global_ws, local_ws,
numpy.float32(xoff), numpy.float32(yoff), numpy.float32(zoff),
numpy.uint32(self.block_dim[0]), numpy.uint32(self.block_dim[1]), numpy.uint32(self.block_dim[2]),
res_d
)
cl.enqueue_read_buffer(self.queue, res_d, res).wait()
return res
def print_chunk(chunk, xdim, ydim, zdim):
for z in range(zdim):
sys.stdout.write('\n\n')
for x in range(xdim):
sys.stdout.write('\n')
for y in range(ydim):
val = chunk[x + y*ydim + z*(zdim**2)]
if val > 0: sys.stdout.write(' # ')
else: sys.stdout.write(' . ')
if __name__ == '__main__':
gen = NoiseGenerator()
noise = gen.noise3d()
print_chunk(noise, 32, 32, 32)
|
<commit_before><commit_msg>Add python host code using pyopencl<commit_after>
|
import pyopencl as cl
import numpy
import sys
import datetime
class NoiseGenerator(object):
def __init__(self, block_dim=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
if block_dim: self.block_dim = block_dim
else: self.block_dim = 256, 256, 256
def load_program(self, filename):
with open(filename, 'r') as f:
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def noise3d(self, xoff=0, yoff=0, zoff=0):
self.load_program('simplex.cl')
chunk_size = self.block_dim[0] * self.block_dim[1] * self.block_dim[2]
global_ws = (chunk_size,)
local_ws = None
mf = cl.mem_flags
res = numpy.empty(shape=global_ws, dtype=numpy.float32)
res_d = cl.Buffer(self.ctx, mf.WRITE_ONLY, numpy.float32(1).nbytes*chunk_size)
event = self.program.sdnoise3(
self.queue, global_ws, local_ws,
numpy.float32(xoff), numpy.float32(yoff), numpy.float32(zoff),
numpy.uint32(self.block_dim[0]), numpy.uint32(self.block_dim[1]), numpy.uint32(self.block_dim[2]),
res_d
)
cl.enqueue_read_buffer(self.queue, res_d, res).wait()
return res
def print_chunk(chunk, xdim, ydim, zdim):
for z in range(zdim):
sys.stdout.write('\n\n')
for x in range(xdim):
sys.stdout.write('\n')
for y in range(ydim):
val = chunk[x + y*ydim + z*(zdim**2)]
if val > 0: sys.stdout.write(' # ')
else: sys.stdout.write(' . ')
if __name__ == '__main__':
gen = NoiseGenerator()
noise = gen.noise3d()
print_chunk(noise, 32, 32, 32)
|
Add python host code using pyopenclimport pyopencl as cl
import numpy
import sys
import datetime
class NoiseGenerator(object):
def __init__(self, block_dim=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
if block_dim: self.block_dim = block_dim
else: self.block_dim = 256, 256, 256
def load_program(self, filename):
with open(filename, 'r') as f:
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def noise3d(self, xoff=0, yoff=0, zoff=0):
self.load_program('simplex.cl')
chunk_size = self.block_dim[0] * self.block_dim[1] * self.block_dim[2]
global_ws = (chunk_size,)
local_ws = None
mf = cl.mem_flags
res = numpy.empty(shape=global_ws, dtype=numpy.float32)
res_d = cl.Buffer(self.ctx, mf.WRITE_ONLY, numpy.float32(1).nbytes*chunk_size)
event = self.program.sdnoise3(
self.queue, global_ws, local_ws,
numpy.float32(xoff), numpy.float32(yoff), numpy.float32(zoff),
numpy.uint32(self.block_dim[0]), numpy.uint32(self.block_dim[1]), numpy.uint32(self.block_dim[2]),
res_d
)
cl.enqueue_read_buffer(self.queue, res_d, res).wait()
return res
def print_chunk(chunk, xdim, ydim, zdim):
for z in range(zdim):
sys.stdout.write('\n\n')
for x in range(xdim):
sys.stdout.write('\n')
for y in range(ydim):
val = chunk[x + y*ydim + z*(zdim**2)]
if val > 0: sys.stdout.write(' # ')
else: sys.stdout.write(' . ')
if __name__ == '__main__':
gen = NoiseGenerator()
noise = gen.noise3d()
print_chunk(noise, 32, 32, 32)
|
<commit_before><commit_msg>Add python host code using pyopencl<commit_after>import pyopencl as cl
import numpy
import sys
import datetime
class NoiseGenerator(object):
def __init__(self, block_dim=None):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
if block_dim: self.block_dim = block_dim
else: self.block_dim = 256, 256, 256
def load_program(self, filename):
with open(filename, 'r') as f:
fstr = "".join(f.readlines())
self.program = cl.Program(self.ctx, fstr).build()
def noise3d(self, xoff=0, yoff=0, zoff=0):
self.load_program('simplex.cl')
chunk_size = self.block_dim[0] * self.block_dim[1] * self.block_dim[2]
global_ws = (chunk_size,)
local_ws = None
mf = cl.mem_flags
res = numpy.empty(shape=global_ws, dtype=numpy.float32)
res_d = cl.Buffer(self.ctx, mf.WRITE_ONLY, numpy.float32(1).nbytes*chunk_size)
event = self.program.sdnoise3(
self.queue, global_ws, local_ws,
numpy.float32(xoff), numpy.float32(yoff), numpy.float32(zoff),
numpy.uint32(self.block_dim[0]), numpy.uint32(self.block_dim[1]), numpy.uint32(self.block_dim[2]),
res_d
)
cl.enqueue_read_buffer(self.queue, res_d, res).wait()
return res
def print_chunk(chunk, xdim, ydim, zdim):
for z in range(zdim):
sys.stdout.write('\n\n')
for x in range(xdim):
sys.stdout.write('\n')
for y in range(ydim):
val = chunk[x + y*ydim + z*(zdim**2)]
if val > 0: sys.stdout.write(' # ')
else: sys.stdout.write(' . ')
if __name__ == '__main__':
gen = NoiseGenerator()
noise = gen.noise3d()
print_chunk(noise, 32, 32, 32)
|
|
16b6a285e933313625da0b3c71455a3506081d0b
|
qthread.py
|
qthread.py
|
import sys
from queue import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# The new Stream Object which replaces the default stream associated with sys.stdout
# This object just puts data in a queue!
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().
# It blocks until data is available, and one it has got something from the queue, it sends
# it to the "MainThread" by emitting a Qt Signal
class MyReceiver(QObject):
mysignal = pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QObject.__init__(self,*args,**kwargs)
self.queue = queue
@pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
# An example QObject (to be run in a QThread) which outputs information with print
class LongRunningThing(QObject):
@pyqtSlot()
def run(self):
for i in range(1000):
print(i)
# An Example application QWidget containing the textedit to redirect stdout to
class MyApp(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.layout = QVBoxLayout(self)
self.textedit = QTextEdit()
self.button = QPushButton('start long running thread')
self.button.clicked.connect(self.start_thread)
self.layout.addWidget(self.textedit)
self.layout.addWidget(self.button)
@pyqtSlot(str)
def append_text(self,text):
self.textedit.moveCursor(QTextCursor.End)
self.textedit.insertPlainText( text )
@pyqtSlot()
def start_thread(self):
self.thread = QThread()
self.long_running_thing = LongRunningThing()
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
# Create Queue and redirect sys.stdout to this queue
queue = Queue()
sys.stdout = WriteStream(queue)
# Create QApplication and QWidget
qapp = QApplication(sys.argv)
app = MyApp()
app.show()
# Create thread that will listen on the other end of the queue, and send the text to the textedit in our application
thread = QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(app.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
qapp.exec_()
|
Copy QThread code from stackoverflow
|
Copy QThread code from stackoverflow
|
Python
|
bsd-3-clause
|
torebutlin/cued_datalogger
|
Copy QThread code from stackoverflow
|
import sys
from queue import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# The new Stream Object which replaces the default stream associated with sys.stdout
# This object just puts data in a queue!
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().
# It blocks until data is available, and one it has got something from the queue, it sends
# it to the "MainThread" by emitting a Qt Signal
class MyReceiver(QObject):
mysignal = pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QObject.__init__(self,*args,**kwargs)
self.queue = queue
@pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
# An example QObject (to be run in a QThread) which outputs information with print
class LongRunningThing(QObject):
@pyqtSlot()
def run(self):
for i in range(1000):
print(i)
# An Example application QWidget containing the textedit to redirect stdout to
class MyApp(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.layout = QVBoxLayout(self)
self.textedit = QTextEdit()
self.button = QPushButton('start long running thread')
self.button.clicked.connect(self.start_thread)
self.layout.addWidget(self.textedit)
self.layout.addWidget(self.button)
@pyqtSlot(str)
def append_text(self,text):
self.textedit.moveCursor(QTextCursor.End)
self.textedit.insertPlainText( text )
@pyqtSlot()
def start_thread(self):
self.thread = QThread()
self.long_running_thing = LongRunningThing()
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
# Create Queue and redirect sys.stdout to this queue
queue = Queue()
sys.stdout = WriteStream(queue)
# Create QApplication and QWidget
qapp = QApplication(sys.argv)
app = MyApp()
app.show()
# Create thread that will listen on the other end of the queue, and send the text to the textedit in our application
thread = QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(app.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
qapp.exec_()
|
<commit_before><commit_msg>Copy QThread code from stackoverflow<commit_after>
|
import sys
from queue import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# The new Stream Object which replaces the default stream associated with sys.stdout
# This object just puts data in a queue!
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().
# It blocks until data is available, and one it has got something from the queue, it sends
# it to the "MainThread" by emitting a Qt Signal
class MyReceiver(QObject):
mysignal = pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QObject.__init__(self,*args,**kwargs)
self.queue = queue
@pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
# An example QObject (to be run in a QThread) which outputs information with print
class LongRunningThing(QObject):
@pyqtSlot()
def run(self):
for i in range(1000):
print(i)
# An Example application QWidget containing the textedit to redirect stdout to
class MyApp(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.layout = QVBoxLayout(self)
self.textedit = QTextEdit()
self.button = QPushButton('start long running thread')
self.button.clicked.connect(self.start_thread)
self.layout.addWidget(self.textedit)
self.layout.addWidget(self.button)
@pyqtSlot(str)
def append_text(self,text):
self.textedit.moveCursor(QTextCursor.End)
self.textedit.insertPlainText( text )
@pyqtSlot()
def start_thread(self):
self.thread = QThread()
self.long_running_thing = LongRunningThing()
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
# Create Queue and redirect sys.stdout to this queue
queue = Queue()
sys.stdout = WriteStream(queue)
# Create QApplication and QWidget
qapp = QApplication(sys.argv)
app = MyApp()
app.show()
# Create thread that will listen on the other end of the queue, and send the text to the textedit in our application
thread = QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(app.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
qapp.exec_()
|
Copy QThread code from stackoverflowimport sys
from queue import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# The new Stream Object which replaces the default stream associated with sys.stdout
# This object just puts data in a queue!
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().
# It blocks until data is available, and one it has got something from the queue, it sends
# it to the "MainThread" by emitting a Qt Signal
class MyReceiver(QObject):
mysignal = pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QObject.__init__(self,*args,**kwargs)
self.queue = queue
@pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
# An example QObject (to be run in a QThread) which outputs information with print
class LongRunningThing(QObject):
@pyqtSlot()
def run(self):
for i in range(1000):
print(i)
# An Example application QWidget containing the textedit to redirect stdout to
class MyApp(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.layout = QVBoxLayout(self)
self.textedit = QTextEdit()
self.button = QPushButton('start long running thread')
self.button.clicked.connect(self.start_thread)
self.layout.addWidget(self.textedit)
self.layout.addWidget(self.button)
@pyqtSlot(str)
def append_text(self,text):
self.textedit.moveCursor(QTextCursor.End)
self.textedit.insertPlainText( text )
@pyqtSlot()
def start_thread(self):
self.thread = QThread()
self.long_running_thing = LongRunningThing()
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
# Create Queue and redirect sys.stdout to this queue
queue = Queue()
sys.stdout = WriteStream(queue)
# Create QApplication and QWidget
qapp = QApplication(sys.argv)
app = MyApp()
app.show()
# Create thread that will listen on the other end of the queue, and send the text to the textedit in our application
thread = QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(app.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
qapp.exec_()
|
<commit_before><commit_msg>Copy QThread code from stackoverflow<commit_after>import sys
from queue import Queue
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# The new Stream Object which replaces the default stream associated with sys.stdout
# This object just puts data in a queue!
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
# A QObject (to be run in a QThread) which sits waiting for data to come through a Queue.Queue().
# It blocks until data is available, and one it has got something from the queue, it sends
# it to the "MainThread" by emitting a Qt Signal
class MyReceiver(QObject):
mysignal = pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QObject.__init__(self,*args,**kwargs)
self.queue = queue
@pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
# An example QObject (to be run in a QThread) which outputs information with print
class LongRunningThing(QObject):
@pyqtSlot()
def run(self):
for i in range(1000):
print(i)
# An Example application QWidget containing the textedit to redirect stdout to
class MyApp(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.layout = QVBoxLayout(self)
self.textedit = QTextEdit()
self.button = QPushButton('start long running thread')
self.button.clicked.connect(self.start_thread)
self.layout.addWidget(self.textedit)
self.layout.addWidget(self.button)
@pyqtSlot(str)
def append_text(self,text):
self.textedit.moveCursor(QTextCursor.End)
self.textedit.insertPlainText( text )
@pyqtSlot()
def start_thread(self):
self.thread = QThread()
self.long_running_thing = LongRunningThing()
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
# Create Queue and redirect sys.stdout to this queue
queue = Queue()
sys.stdout = WriteStream(queue)
# Create QApplication and QWidget
qapp = QApplication(sys.argv)
app = MyApp()
app.show()
# Create thread that will listen on the other end of the queue, and send the text to the textedit in our application
thread = QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(app.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
qapp.exec_()
|
|
15c799f29652d85fd6957ac0166ffe8590680f33
|
froide/account/migrations/0027_auto_20210412_1518.py
|
froide/account/migrations/0027_auto_20210412_1518.py
|
# Generated by Django 3.1.8 on 2021-04-12 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0026_auto_20201221_1953'),
]
operations = [
migrations.AddField(
model_name='application',
name='algorithm',
field=models.CharField(blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5),
),
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32),
),
]
|
Add migration for oauth application
|
Add migration for oauth application
|
Python
|
mit
|
fin/froide,fin/froide,fin/froide,fin/froide
|
Add migration for oauth application
|
# Generated by Django 3.1.8 on 2021-04-12 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0026_auto_20201221_1953'),
]
operations = [
migrations.AddField(
model_name='application',
name='algorithm',
field=models.CharField(blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5),
),
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32),
),
]
|
<commit_before><commit_msg>Add migration for oauth application<commit_after>
|
# Generated by Django 3.1.8 on 2021-04-12 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0026_auto_20201221_1953'),
]
operations = [
migrations.AddField(
model_name='application',
name='algorithm',
field=models.CharField(blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5),
),
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32),
),
]
|
Add migration for oauth application# Generated by Django 3.1.8 on 2021-04-12 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0026_auto_20201221_1953'),
]
operations = [
migrations.AddField(
model_name='application',
name='algorithm',
field=models.CharField(blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5),
),
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32),
),
]
|
<commit_before><commit_msg>Add migration for oauth application<commit_after># Generated by Django 3.1.8 on 2021-04-12 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0026_auto_20201221_1953'),
]
operations = [
migrations.AddField(
model_name='application',
name='algorithm',
field=models.CharField(blank=True, choices=[('', 'No OIDC support'), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5),
),
migrations.AlterField(
model_name='application',
name='authorization_grant_type',
field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32),
),
]
|
|
173aba72cad0c6c3602b2ae4e1b8bd4e5773bd3b
|
pyservice/context.py
|
pyservice/context.py
|
"""
RequestContext stores state relevant to the current request, as well as
keeping track of the plugin execution order and providing a simple method
`advance` for calling the next plugin in the chain.
"""
import collections
class Container(collections.defaultdict):
DEFAULT_FACTORY = lambda: None
def __init__(self):
super().__init__(self, Container.DEFAULT_FACTORY)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
class RequestContext(object):
def __init__(self, service, operation):
self.service = service
self.operation = operation
def execute(self):
self.service.continue_execution(self)
|
Add building blocks Container, Context
|
Add building blocks Container, Context
These will be used for request/response dicts, as well as
containers for plugins to pass functions and values to the
operation function or other plugins.
|
Python
|
mit
|
numberoverzero/pyservice
|
Add building blocks Container, Context
These will be used for request/response dicts, as well as
containers for plugins to pass functions and values to the
operation function or other plugins.
|
"""
RequestContext stores state relevant to the current request, as well as
keeping track of the plugin execution order and providing a simple method
`advance` for calling the next plugin in the chain.
"""
import collections
class Container(collections.defaultdict):
DEFAULT_FACTORY = lambda: None
def __init__(self):
super().__init__(self, Container.DEFAULT_FACTORY)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
class RequestContext(object):
def __init__(self, service, operation):
self.service = service
self.operation = operation
def execute(self):
self.service.continue_execution(self)
|
<commit_before><commit_msg>Add building blocks Container, Context
These will be used for request/response dicts, as well as
containers for plugins to pass functions and values to the
operation function or other plugins.<commit_after>
|
"""
RequestContext stores state relevant to the current request, as well as
keeping track of the plugin execution order and providing a simple method
`advance` for calling the next plugin in the chain.
"""
import collections
class Container(collections.defaultdict):
DEFAULT_FACTORY = lambda: None
def __init__(self):
super().__init__(self, Container.DEFAULT_FACTORY)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
class RequestContext(object):
def __init__(self, service, operation):
self.service = service
self.operation = operation
def execute(self):
self.service.continue_execution(self)
|
Add building blocks Container, Context
These will be used for request/response dicts, as well as
containers for plugins to pass functions and values to the
operation function or other plugins."""
RequestContext stores state relevant to the current request, as well as
keeping track of the plugin execution order and providing a simple method
`advance` for calling the next plugin in the chain.
"""
import collections
class Container(collections.defaultdict):
DEFAULT_FACTORY = lambda: None
def __init__(self):
super().__init__(self, Container.DEFAULT_FACTORY)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
class RequestContext(object):
def __init__(self, service, operation):
self.service = service
self.operation = operation
def execute(self):
self.service.continue_execution(self)
|
<commit_before><commit_msg>Add building blocks Container, Context
These will be used for request/response dicts, as well as
containers for plugins to pass functions and values to the
operation function or other plugins.<commit_after>"""
RequestContext stores state relevant to the current request, as well as
keeping track of the plugin execution order and providing a simple method
`advance` for calling the next plugin in the chain.
"""
import collections
class Container(collections.defaultdict):
DEFAULT_FACTORY = lambda: None
def __init__(self):
super().__init__(self, Container.DEFAULT_FACTORY)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
class RequestContext(object):
def __init__(self, service, operation):
self.service = service
self.operation = operation
def execute(self):
self.service.continue_execution(self)
|
|
b98fe743844973b2b03dd128fc906cfd4cfa9728
|
solutions/uri/1036/1036.py
|
solutions/uri/1036/1036.py
|
import math
a, b, c = map(float, input().split())
delta = math.pow(b, 2) - 4 * a * c
if delta >= 0 and a != 0:
print(f'R1 = {((b * -1) + math.sqrt(delta)) / (2 * a):.5f}')
print(f'R2 = {((b * -1) - math.sqrt(delta)) / (2 * a):.5f}')
else:
print('Impossivel calcular')
|
Solve Bhaskara's Formula in python
|
Solve Bhaskara's Formula in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Bhaskara's Formula in python
|
import math
a, b, c = map(float, input().split())
delta = math.pow(b, 2) - 4 * a * c
if delta >= 0 and a != 0:
print(f'R1 = {((b * -1) + math.sqrt(delta)) / (2 * a):.5f}')
print(f'R2 = {((b * -1) - math.sqrt(delta)) / (2 * a):.5f}')
else:
print('Impossivel calcular')
|
<commit_before><commit_msg>Solve Bhaskara's Formula in python<commit_after>
|
import math
a, b, c = map(float, input().split())
delta = math.pow(b, 2) - 4 * a * c
if delta >= 0 and a != 0:
print(f'R1 = {((b * -1) + math.sqrt(delta)) / (2 * a):.5f}')
print(f'R2 = {((b * -1) - math.sqrt(delta)) / (2 * a):.5f}')
else:
print('Impossivel calcular')
|
Solve Bhaskara's Formula in pythonimport math
a, b, c = map(float, input().split())
delta = math.pow(b, 2) - 4 * a * c
if delta >= 0 and a != 0:
print(f'R1 = {((b * -1) + math.sqrt(delta)) / (2 * a):.5f}')
print(f'R2 = {((b * -1) - math.sqrt(delta)) / (2 * a):.5f}')
else:
print('Impossivel calcular')
|
<commit_before><commit_msg>Solve Bhaskara's Formula in python<commit_after>import math
a, b, c = map(float, input().split())
delta = math.pow(b, 2) - 4 * a * c
if delta >= 0 and a != 0:
print(f'R1 = {((b * -1) + math.sqrt(delta)) / (2 * a):.5f}')
print(f'R2 = {((b * -1) - math.sqrt(delta)) / (2 * a):.5f}')
else:
print('Impossivel calcular')
|
|
dba4c700d0fbc68e853718d27471b4dd7ed21253
|
scripts/set_product_info.py
|
scripts/set_product_info.py
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
"""
Script to set product info on a group.
"""
import sys
import epdb
sys.excepthook = epdb.excepthook()
import json
from conary import trove
from conary import conarycfg
from conary import conaryclient
from conary.conaryclient import cmdline
def setProductInfo(trvSpec, info):
trvSpec = cmdline.parseTroveSpec(trvSpec)
cfg = conarycfg.ConaryConfiguration(True)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
nvfs = repos.findTrove(None, trvSpec)
if not len(nvfs):
print >>sys.stderr, 'did not find any troves matching %s' % trvSpec
return 1
nvf = nvfs[0]
trv = repos.getTrove(*nvf)
md = trv.troveInfo.metadata
keyValue = md.get(1).get('keyValue')
if not keyValue:
mi = trove.MetadataItem()
md.addItem(mi)
keyValue = mi.keyValue
keyValue['product_info'] = json.dumps(info)
repos.setTroveInfo([(nvf, trv.troveInfo), ])
def usage(args):
print >>sys.stderr, 'usage: %s <trove spec> <display name>' % args[0]
return 1
def main(args):
if len(args) != 3:
return usage(args)
troveSpec = args[1]
displayName = args[2]
setProductInfo(troveSpec, {
'displayName': displayName,
})
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add metadata example code APPENG-3048
|
Add metadata example code APPENG-3048
|
Python
|
apache-2.0
|
sassoftware/mirrorball,sassoftware/mirrorball
|
Add metadata example code APPENG-3048
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
"""
Script to set product info on a group.
"""
import sys
import epdb
sys.excepthook = epdb.excepthook()
import json
from conary import trove
from conary import conarycfg
from conary import conaryclient
from conary.conaryclient import cmdline
def setProductInfo(trvSpec, info):
trvSpec = cmdline.parseTroveSpec(trvSpec)
cfg = conarycfg.ConaryConfiguration(True)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
nvfs = repos.findTrove(None, trvSpec)
if not len(nvfs):
print >>sys.stderr, 'did not find any troves matching %s' % trvSpec
return 1
nvf = nvfs[0]
trv = repos.getTrove(*nvf)
md = trv.troveInfo.metadata
keyValue = md.get(1).get('keyValue')
if not keyValue:
mi = trove.MetadataItem()
md.addItem(mi)
keyValue = mi.keyValue
keyValue['product_info'] = json.dumps(info)
repos.setTroveInfo([(nvf, trv.troveInfo), ])
def usage(args):
print >>sys.stderr, 'usage: %s <trove spec> <display name>' % args[0]
return 1
def main(args):
if len(args) != 3:
return usage(args)
troveSpec = args[1]
displayName = args[2]
setProductInfo(troveSpec, {
'displayName': displayName,
})
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add metadata example code APPENG-3048<commit_after>
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
"""
Script to set product info on a group.
"""
import sys
import epdb
sys.excepthook = epdb.excepthook()
import json
from conary import trove
from conary import conarycfg
from conary import conaryclient
from conary.conaryclient import cmdline
def setProductInfo(trvSpec, info):
trvSpec = cmdline.parseTroveSpec(trvSpec)
cfg = conarycfg.ConaryConfiguration(True)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
nvfs = repos.findTrove(None, trvSpec)
if not len(nvfs):
print >>sys.stderr, 'did not find any troves matching %s' % trvSpec
return 1
nvf = nvfs[0]
trv = repos.getTrove(*nvf)
md = trv.troveInfo.metadata
keyValue = md.get(1).get('keyValue')
if not keyValue:
mi = trove.MetadataItem()
md.addItem(mi)
keyValue = mi.keyValue
keyValue['product_info'] = json.dumps(info)
repos.setTroveInfo([(nvf, trv.troveInfo), ])
def usage(args):
print >>sys.stderr, 'usage: %s <trove spec> <display name>' % args[0]
return 1
def main(args):
if len(args) != 3:
return usage(args)
troveSpec = args[1]
displayName = args[2]
setProductInfo(troveSpec, {
'displayName': displayName,
})
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add metadata example code APPENG-3048#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
"""
Script to set product info on a group.
"""
import sys
import epdb
sys.excepthook = epdb.excepthook()
import json
from conary import trove
from conary import conarycfg
from conary import conaryclient
from conary.conaryclient import cmdline
def setProductInfo(trvSpec, info):
trvSpec = cmdline.parseTroveSpec(trvSpec)
cfg = conarycfg.ConaryConfiguration(True)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
nvfs = repos.findTrove(None, trvSpec)
if not len(nvfs):
print >>sys.stderr, 'did not find any troves matching %s' % trvSpec
return 1
nvf = nvfs[0]
trv = repos.getTrove(*nvf)
md = trv.troveInfo.metadata
keyValue = md.get(1).get('keyValue')
if not keyValue:
mi = trove.MetadataItem()
md.addItem(mi)
keyValue = mi.keyValue
keyValue['product_info'] = json.dumps(info)
repos.setTroveInfo([(nvf, trv.troveInfo), ])
def usage(args):
print >>sys.stderr, 'usage: %s <trove spec> <display name>' % args[0]
return 1
def main(args):
if len(args) != 3:
return usage(args)
troveSpec = args[1]
displayName = args[2]
setProductInfo(troveSpec, {
'displayName': displayName,
})
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add metadata example code APPENG-3048<commit_after>#!/usr/bin/python
#
# Copyright (c) SAS Institute, Inc.
#
"""
Script to set product info on a group.
"""
import sys
import epdb
sys.excepthook = epdb.excepthook()
import json
from conary import trove
from conary import conarycfg
from conary import conaryclient
from conary.conaryclient import cmdline
def setProductInfo(trvSpec, info):
trvSpec = cmdline.parseTroveSpec(trvSpec)
cfg = conarycfg.ConaryConfiguration(True)
client = conaryclient.ConaryClient(cfg)
repos = client.getRepos()
nvfs = repos.findTrove(None, trvSpec)
if not len(nvfs):
print >>sys.stderr, 'did not find any troves matching %s' % trvSpec
return 1
nvf = nvfs[0]
trv = repos.getTrove(*nvf)
md = trv.troveInfo.metadata
keyValue = md.get(1).get('keyValue')
if not keyValue:
mi = trove.MetadataItem()
md.addItem(mi)
keyValue = mi.keyValue
keyValue['product_info'] = json.dumps(info)
repos.setTroveInfo([(nvf, trv.troveInfo), ])
def usage(args):
print >>sys.stderr, 'usage: %s <trove spec> <display name>' % args[0]
return 1
def main(args):
if len(args) != 3:
return usage(args)
troveSpec = args[1]
displayName = args[2]
setProductInfo(troveSpec, {
'displayName': displayName,
})
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
1c856409149ce1523dbe080717465e1985f778f7
|
scripts/find-skeletons-with-no-treenodes.py
|
scripts/find-skeletons-with-no-treenodes.py
|
#!/usr/bin/python
# This script checks your database for some common
# inconsistencies or errors that may have arisen from
# past bugs.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys, os
from common import db_connection
from subprocess import check_call
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = sys.argv[1]
c = db_connection.cursor()
# This could be done with a single LEFT OUTER JOIN, but I'm too tired
# to figure that out at the moment...
# Find all the skeletons in the project:
c.execute("""
SELECT ci.id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
WHERE c.class_name = 'skeleton' AND ci.project_id = %s""",
project_id)
all_skeletons = set(x[0] for x in c.fetchall())
# Now find all skeletons that have at least one skeleton:
c.execute("""
SELECT DISTINCT tci.class_instance_id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
INNER JOIN treenode_class_instance tci ON tci.class_instance_id = ci.id
INNER JOIN relation r ON tci.relation_id = r.id AND r.relation_name = 'element_of'""")
skeletons_with_at_least_one_treenode = set(x[0] for x in c.fetchall())
for skeleton_id in sorted((all_skeletons - skeletons_with_at_least_one_treenode)):
print skeleton_id
|
Add a script to find "empty" skeletons
|
Add a script to find "empty" skeletons
|
Python
|
agpl-3.0
|
htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID
|
Add a script to find "empty" skeletons
|
#!/usr/bin/python
# This script checks your database for some common
# inconsistencies or errors that may have arisen from
# past bugs.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys, os
from common import db_connection
from subprocess import check_call
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = sys.argv[1]
c = db_connection.cursor()
# This could be done with a single LEFT OUTER JOIN, but I'm too tired
# to figure that out at the moment...
# Find all the skeletons in the project:
c.execute("""
SELECT ci.id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
WHERE c.class_name = 'skeleton' AND ci.project_id = %s""",
project_id)
all_skeletons = set(x[0] for x in c.fetchall())
# Now find all skeletons that have at least one skeleton:
c.execute("""
SELECT DISTINCT tci.class_instance_id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
INNER JOIN treenode_class_instance tci ON tci.class_instance_id = ci.id
INNER JOIN relation r ON tci.relation_id = r.id AND r.relation_name = 'element_of'""")
skeletons_with_at_least_one_treenode = set(x[0] for x in c.fetchall())
for skeleton_id in sorted((all_skeletons - skeletons_with_at_least_one_treenode)):
print skeleton_id
|
<commit_before><commit_msg>Add a script to find "empty" skeletons<commit_after>
|
#!/usr/bin/python
# This script checks your database for some common
# inconsistencies or errors that may have arisen from
# past bugs.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys, os
from common import db_connection
from subprocess import check_call
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = sys.argv[1]
c = db_connection.cursor()
# This could be done with a single LEFT OUTER JOIN, but I'm too tired
# to figure that out at the moment...
# Find all the skeletons in the project:
c.execute("""
SELECT ci.id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
WHERE c.class_name = 'skeleton' AND ci.project_id = %s""",
project_id)
all_skeletons = set(x[0] for x in c.fetchall())
# Now find all skeletons that have at least one skeleton:
c.execute("""
SELECT DISTINCT tci.class_instance_id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
INNER JOIN treenode_class_instance tci ON tci.class_instance_id = ci.id
INNER JOIN relation r ON tci.relation_id = r.id AND r.relation_name = 'element_of'""")
skeletons_with_at_least_one_treenode = set(x[0] for x in c.fetchall())
for skeleton_id in sorted((all_skeletons - skeletons_with_at_least_one_treenode)):
print skeleton_id
|
Add a script to find "empty" skeletons#!/usr/bin/python
# This script checks your database for some common
# inconsistencies or errors that may have arisen from
# past bugs.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys, os
from common import db_connection
from subprocess import check_call
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = sys.argv[1]
c = db_connection.cursor()
# This could be done with a single LEFT OUTER JOIN, but I'm too tired
# to figure that out at the moment...
# Find all the skeletons in the project:
c.execute("""
SELECT ci.id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
WHERE c.class_name = 'skeleton' AND ci.project_id = %s""",
project_id)
all_skeletons = set(x[0] for x in c.fetchall())
# Now find all skeletons that have at least one skeleton:
c.execute("""
SELECT DISTINCT tci.class_instance_id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
INNER JOIN treenode_class_instance tci ON tci.class_instance_id = ci.id
INNER JOIN relation r ON tci.relation_id = r.id AND r.relation_name = 'element_of'""")
skeletons_with_at_least_one_treenode = set(x[0] for x in c.fetchall())
for skeleton_id in sorted((all_skeletons - skeletons_with_at_least_one_treenode)):
print skeleton_id
|
<commit_before><commit_msg>Add a script to find "empty" skeletons<commit_after>#!/usr/bin/python
# This script checks your database for some common
# inconsistencies or errors that may have arisen from
# past bugs.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
# Requires the file .catmaid-db to be present in your
# home directory, with the following format:
#
# host: localhost
# database: catmaid
# username: catmaid_user
# password: password_of_your_catmaid_user
import sys, os
from common import db_connection
from subprocess import check_call
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = sys.argv[1]
c = db_connection.cursor()
# This could be done with a single LEFT OUTER JOIN, but I'm too tired
# to figure that out at the moment...
# Find all the skeletons in the project:
c.execute("""
SELECT ci.id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
WHERE c.class_name = 'skeleton' AND ci.project_id = %s""",
project_id)
all_skeletons = set(x[0] for x in c.fetchall())
# Now find all skeletons that have at least one skeleton:
c.execute("""
SELECT DISTINCT tci.class_instance_id
FROM (class_instance ci INNER JOIN class c ON c.id = ci.class_id AND c.class_name = 'skeleton')
INNER JOIN treenode_class_instance tci ON tci.class_instance_id = ci.id
INNER JOIN relation r ON tci.relation_id = r.id AND r.relation_name = 'element_of'""")
skeletons_with_at_least_one_treenode = set(x[0] for x in c.fetchall())
for skeleton_id in sorted((all_skeletons - skeletons_with_at_least_one_treenode)):
print skeleton_id
|
|
6276e1ad6e74874cdc2ab8e84608d02edbf6b587
|
unify/framework/jasyscript.py
|
unify/framework/jasyscript.py
|
# Unify project builder
# Copyright 2012 Sebastian Fastner, Mainz, Germany
import webbrowser
@task("Open help in browser")
def help():
# Clearing cache
webbrowser.open("http://unify-training.com/")
|
Add initial unify bould script
|
Add initial unify bould script
|
Python
|
mit
|
unify/unify,unify/unify,unify/unify,unify/unify,unify/unify,unify/unify
|
Add initial unify bould script
|
# Unify project builder
# Copyright 2012 Sebastian Fastner, Mainz, Germany
import webbrowser
@task("Open help in browser")
def help():
# Clearing cache
webbrowser.open("http://unify-training.com/")
|
<commit_before><commit_msg>Add initial unify bould script<commit_after>
|
# Unify project builder
# Copyright 2012 Sebastian Fastner, Mainz, Germany
import webbrowser
@task("Open help in browser")
def help():
# Clearing cache
webbrowser.open("http://unify-training.com/")
|
Add initial unify bould script# Unify project builder
# Copyright 2012 Sebastian Fastner, Mainz, Germany
import webbrowser
@task("Open help in browser")
def help():
# Clearing cache
webbrowser.open("http://unify-training.com/")
|
<commit_before><commit_msg>Add initial unify bould script<commit_after># Unify project builder
# Copyright 2012 Sebastian Fastner, Mainz, Germany
import webbrowser
@task("Open help in browser")
def help():
# Clearing cache
webbrowser.open("http://unify-training.com/")
|
|
be1430571bb043bc397e73873a61908d40a2bda1
|
scratchpad/nano-i2c-test.py
|
scratchpad/nano-i2c-test.py
|
#!/usr/bin/env python3
import smbus
import time
# for RPI version 1, use “bus = smbus.SMBus(0)”
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
cmd_read_analog = 1
while True:
number = bus.read_word_data(address, cmd_read_analog)
print("analog value =", "{0:4X}".format(number))
# wait for a bit for next reading
time.sleep(1)
|
Read value from nano via i2c
|
Read value from nano via i2c
|
Python
|
mit
|
gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,thelonious/g2x
|
Read value from nano via i2c
|
#!/usr/bin/env python3
import smbus
import time
# for RPI version 1, use “bus = smbus.SMBus(0)”
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
cmd_read_analog = 1
while True:
number = bus.read_word_data(address, cmd_read_analog)
print("analog value =", "{0:4X}".format(number))
# wait for a bit for next reading
time.sleep(1)
|
<commit_before><commit_msg>Read value from nano via i2c<commit_after>
|
#!/usr/bin/env python3
import smbus
import time
# for RPI version 1, use “bus = smbus.SMBus(0)”
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
cmd_read_analog = 1
while True:
number = bus.read_word_data(address, cmd_read_analog)
print("analog value =", "{0:4X}".format(number))
# wait for a bit for next reading
time.sleep(1)
|
Read value from nano via i2c#!/usr/bin/env python3
import smbus
import time
# for RPI version 1, use “bus = smbus.SMBus(0)”
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
cmd_read_analog = 1
while True:
number = bus.read_word_data(address, cmd_read_analog)
print("analog value =", "{0:4X}".format(number))
# wait for a bit for next reading
time.sleep(1)
|
<commit_before><commit_msg>Read value from nano via i2c<commit_after>#!/usr/bin/env python3
import smbus
import time
# for RPI version 1, use “bus = smbus.SMBus(0)”
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
cmd_read_analog = 1
while True:
number = bus.read_word_data(address, cmd_read_analog)
print("analog value =", "{0:4X}".format(number))
# wait for a bit for next reading
time.sleep(1)
|
|
89f4f16e3ec8b6018c9ef487ae4059271e1829df
|
tests/functional/test_new_resolver.py
|
tests/functional/test_new_resolver.py
|
import json
from tests.lib import create_basic_wheel_for_package
def assert_installed(script, **kwargs):
ret = script.pip('list', '--format=json')
installed = set(
(val['name'], val['version'])
for val in json.loads(ret.stdout)
)
assert set(kwargs.items()) <= installed
def test_new_resolver_can_install(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_can_install_with_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple==0.1.0"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_picks_latest_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
package = create_basic_wheel_for_package(
script,
"simple",
"0.2.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.2.0")
def test_new_resolver_installs_dependencies(script):
package = create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
)
package = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"base"
)
assert_installed(script, base="0.1.0", dep="0.1.0")
|
Add some functional tests for the new resolver
|
Add some functional tests for the new resolver
|
Python
|
mit
|
pfmoore/pip,pypa/pip,sbidoul/pip,pypa/pip,pfmoore/pip,pradyunsg/pip,sbidoul/pip,pradyunsg/pip
|
Add some functional tests for the new resolver
|
import json
from tests.lib import create_basic_wheel_for_package
def assert_installed(script, **kwargs):
ret = script.pip('list', '--format=json')
installed = set(
(val['name'], val['version'])
for val in json.loads(ret.stdout)
)
assert set(kwargs.items()) <= installed
def test_new_resolver_can_install(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_can_install_with_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple==0.1.0"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_picks_latest_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
package = create_basic_wheel_for_package(
script,
"simple",
"0.2.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.2.0")
def test_new_resolver_installs_dependencies(script):
package = create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
)
package = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"base"
)
assert_installed(script, base="0.1.0", dep="0.1.0")
|
<commit_before><commit_msg>Add some functional tests for the new resolver<commit_after>
|
import json
from tests.lib import create_basic_wheel_for_package
def assert_installed(script, **kwargs):
ret = script.pip('list', '--format=json')
installed = set(
(val['name'], val['version'])
for val in json.loads(ret.stdout)
)
assert set(kwargs.items()) <= installed
def test_new_resolver_can_install(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_can_install_with_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple==0.1.0"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_picks_latest_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
package = create_basic_wheel_for_package(
script,
"simple",
"0.2.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.2.0")
def test_new_resolver_installs_dependencies(script):
package = create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
)
package = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"base"
)
assert_installed(script, base="0.1.0", dep="0.1.0")
|
Add some functional tests for the new resolverimport json
from tests.lib import create_basic_wheel_for_package
def assert_installed(script, **kwargs):
ret = script.pip('list', '--format=json')
installed = set(
(val['name'], val['version'])
for val in json.loads(ret.stdout)
)
assert set(kwargs.items()) <= installed
def test_new_resolver_can_install(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_can_install_with_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple==0.1.0"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_picks_latest_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
package = create_basic_wheel_for_package(
script,
"simple",
"0.2.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.2.0")
def test_new_resolver_installs_dependencies(script):
package = create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
)
package = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"base"
)
assert_installed(script, base="0.1.0", dep="0.1.0")
|
<commit_before><commit_msg>Add some functional tests for the new resolver<commit_after>import json
from tests.lib import create_basic_wheel_for_package
def assert_installed(script, **kwargs):
ret = script.pip('list', '--format=json')
installed = set(
(val['name'], val['version'])
for val in json.loads(ret.stdout)
)
assert set(kwargs.items()) <= installed
def test_new_resolver_can_install(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_can_install_with_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple==0.1.0"
)
assert_installed(script, simple="0.1.0")
def test_new_resolver_picks_latest_version(script):
package = create_basic_wheel_for_package(
script,
"simple",
"0.1.0",
)
package = create_basic_wheel_for_package(
script,
"simple",
"0.2.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"simple"
)
assert_installed(script, simple="0.2.0")
def test_new_resolver_installs_dependencies(script):
package = create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
)
package = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
script.pip(
"install", "--unstable-feature=resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"base"
)
assert_installed(script, base="0.1.0", dep="0.1.0")
|
|
5c45586ed1c90c72620738420d37a444647fe186
|
code/supervise_acronym_gene_mentions.py
|
code/supervise_acronym_gene_mentions.py
|
#! /usr/bin/env pyton3
import fileinput
import json
# Process the input
with fileinput.input() as input_files:
for line in input_files:
mention = json.loads(line)
mention.is_correct = False
print(json.dumps(mention))
|
Add script to supervise acronyms
|
Add script to supervise acronyms
|
Python
|
apache-2.0
|
HazyResearch/dd-genomics,HazyResearch/dd-genomics,amwenger/dd-genomics,rionda/dd-genomics,rionda/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,amwenger/dd-genomics,amwenger/dd-genomics
|
Add script to supervise acronyms
|
#! /usr/bin/env pyton3
import fileinput
import json
# Process the input
with fileinput.input() as input_files:
for line in input_files:
mention = json.loads(line)
mention.is_correct = False
print(json.dumps(mention))
|
<commit_before><commit_msg>Add script to supervise acronyms<commit_after>
|
#! /usr/bin/env pyton3
import fileinput
import json
# Process the input
with fileinput.input() as input_files:
for line in input_files:
mention = json.loads(line)
mention.is_correct = False
print(json.dumps(mention))
|
Add script to supervise acronyms#! /usr/bin/env pyton3
import fileinput
import json
# Process the input
with fileinput.input() as input_files:
for line in input_files:
mention = json.loads(line)
mention.is_correct = False
print(json.dumps(mention))
|
<commit_before><commit_msg>Add script to supervise acronyms<commit_after>#! /usr/bin/env pyton3
import fileinput
import json
# Process the input
with fileinput.input() as input_files:
for line in input_files:
mention = json.loads(line)
mention.is_correct = False
print(json.dumps(mention))
|
|
fef62c34be24ac2027d89ffc0c3926b89ad7625d
|
tools/data/change_window_file_nums.py
|
tools/data/change_window_file_nums.py
|
#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Change window file numbers.')
parser.add_argument('window_file')
parser.add_argument('start', type=int)
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
save_file.write('# {}\n'.format(image_ind + args.start))
# read image line and image specs
for __ in xrange(5):
save_file.write(f.readline())
num = int(f.readline().strip())
save_file.write('{}\n'.format(num))
for i in xrange(num):
save_file.write(f.readline())
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
|
Add script to change starting index in window file.
|
Add script to change starting index in window file.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add script to change starting index in window file.
|
#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Change window file numbers.')
parser.add_argument('window_file')
parser.add_argument('start', type=int)
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
save_file.write('# {}\n'.format(image_ind + args.start))
# read image line and image specs
for __ in xrange(5):
save_file.write(f.readline())
num = int(f.readline().strip())
save_file.write('{}\n'.format(num))
for i in xrange(num):
save_file.write(f.readline())
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
|
<commit_before><commit_msg>Add script to change starting index in window file.<commit_after>
|
#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Change window file numbers.')
parser.add_argument('window_file')
parser.add_argument('start', type=int)
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
save_file.write('# {}\n'.format(image_ind + args.start))
# read image line and image specs
for __ in xrange(5):
save_file.write(f.readline())
num = int(f.readline().strip())
save_file.write('{}\n'.format(num))
for i in xrange(num):
save_file.write(f.readline())
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
|
Add script to change starting index in window file.#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Change window file numbers.')
parser.add_argument('window_file')
parser.add_argument('start', type=int)
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
save_file.write('# {}\n'.format(image_ind + args.start))
# read image line and image specs
for __ in xrange(5):
save_file.write(f.readline())
num = int(f.readline().strip())
save_file.write('{}\n'.format(num))
for i in xrange(num):
save_file.write(f.readline())
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
|
<commit_before><commit_msg>Add script to change starting index in window file.<commit_after>#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Change window file numbers.')
parser.add_argument('window_file')
parser.add_argument('start', type=int)
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
save_file.write('# {}\n'.format(image_ind + args.start))
# read image line and image specs
for __ in xrange(5):
save_file.write(f.readline())
num = int(f.readline().strip())
save_file.write('{}\n'.format(num))
for i in xrange(num):
save_file.write(f.readline())
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.