commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a54416928cd53a4af64c6705f200803751721c3
|
course_discovery/apps/course_metadata/migrations/0067_auto_20171108_1432.py
|
course_discovery/apps/course_metadata/migrations/0067_auto_20171108_1432.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0066_auto_20171107_1707'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='key',
field=models.CharField(help_text='Only ascii characters allowed (a-zA-Z0-9)', max_length=255),
),
]
|
Add the missing migration left out from previous change
|
Add the missing migration left out from previous change
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Add the missing migration left out from previous change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0066_auto_20171107_1707'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='key',
field=models.CharField(help_text='Only ascii characters allowed (a-zA-Z0-9)', max_length=255),
),
]
|
<commit_before><commit_msg>Add the missing migration left out from previous change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0066_auto_20171107_1707'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='key',
field=models.CharField(help_text='Only ascii characters allowed (a-zA-Z0-9)', max_length=255),
),
]
|
Add the missing migration left out from previous change# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0066_auto_20171107_1707'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='key',
field=models.CharField(help_text='Only ascii characters allowed (a-zA-Z0-9)', max_length=255),
),
]
|
<commit_before><commit_msg>Add the missing migration left out from previous change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-08 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0066_auto_20171107_1707'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='key',
field=models.CharField(help_text='Only ascii characters allowed (a-zA-Z0-9)', max_length=255),
),
]
|
|
2e73f7c8d9219715ec76e1081080b63da1dc6d0d
|
src/excel_sheet_column_number.py
|
src/excel_sheet_column_number.py
|
"""
Source : https://oj.leetcode.com/problems/excel-sheet-column-number/
Author : Changxi Wu
Date : 2015-01-21
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
def titleToNumber(s):
# @praram s, a string
# @return an integer
result = 0
length = len(s)
for i, c in enumerate(s):
if length == 1:
result += (ord(c)-ord('A')+1)
else:
result += (ord(c)-ord('A')+1)*(26**(length-i-1))
return result
if __name__ == '__main__':
test = {'A':1, 'B':2, 'C':3, 'D':4, 'AA':27, 'AB':28, 'ABC':731}
for s in test.keys():
result = titleToNumber(s)
if result != test[s]:
print 'Input: ', s
print 'Output:', result
print 'Expected:', test[s]
|
Add solution for excel sheet column number
|
Add solution for excel sheet column number
|
Python
|
mit
|
chancyWu/leetcode
|
Add solution for excel sheet column number
|
"""
Source : https://oj.leetcode.com/problems/excel-sheet-column-number/
Author : Changxi Wu
Date : 2015-01-21
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
def titleToNumber(s):
# @praram s, a string
# @return an integer
result = 0
length = len(s)
for i, c in enumerate(s):
if length == 1:
result += (ord(c)-ord('A')+1)
else:
result += (ord(c)-ord('A')+1)*(26**(length-i-1))
return result
if __name__ == '__main__':
test = {'A':1, 'B':2, 'C':3, 'D':4, 'AA':27, 'AB':28, 'ABC':731}
for s in test.keys():
result = titleToNumber(s)
if result != test[s]:
print 'Input: ', s
print 'Output:', result
print 'Expected:', test[s]
|
<commit_before><commit_msg>Add solution for excel sheet column number<commit_after>
|
"""
Source : https://oj.leetcode.com/problems/excel-sheet-column-number/
Author : Changxi Wu
Date : 2015-01-21
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
def titleToNumber(s):
# @praram s, a string
# @return an integer
result = 0
length = len(s)
for i, c in enumerate(s):
if length == 1:
result += (ord(c)-ord('A')+1)
else:
result += (ord(c)-ord('A')+1)*(26**(length-i-1))
return result
if __name__ == '__main__':
test = {'A':1, 'B':2, 'C':3, 'D':4, 'AA':27, 'AB':28, 'ABC':731}
for s in test.keys():
result = titleToNumber(s)
if result != test[s]:
print 'Input: ', s
print 'Output:', result
print 'Expected:', test[s]
|
Add solution for excel sheet column number"""
Source : https://oj.leetcode.com/problems/excel-sheet-column-number/
Author : Changxi Wu
Date : 2015-01-21
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
def titleToNumber(s):
# @praram s, a string
# @return an integer
result = 0
length = len(s)
for i, c in enumerate(s):
if length == 1:
result += (ord(c)-ord('A')+1)
else:
result += (ord(c)-ord('A')+1)*(26**(length-i-1))
return result
if __name__ == '__main__':
test = {'A':1, 'B':2, 'C':3, 'D':4, 'AA':27, 'AB':28, 'ABC':731}
for s in test.keys():
result = titleToNumber(s)
if result != test[s]:
print 'Input: ', s
print 'Output:', result
print 'Expected:', test[s]
|
<commit_before><commit_msg>Add solution for excel sheet column number<commit_after>"""
Source : https://oj.leetcode.com/problems/excel-sheet-column-number/
Author : Changxi Wu
Date : 2015-01-21
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
"""
def titleToNumber(s):
# @praram s, a string
# @return an integer
result = 0
length = len(s)
for i, c in enumerate(s):
if length == 1:
result += (ord(c)-ord('A')+1)
else:
result += (ord(c)-ord('A')+1)*(26**(length-i-1))
return result
if __name__ == '__main__':
test = {'A':1, 'B':2, 'C':3, 'D':4, 'AA':27, 'AB':28, 'ABC':731}
for s in test.keys():
result = titleToNumber(s)
if result != test[s]:
print 'Input: ', s
print 'Output:', result
print 'Expected:', test[s]
|
|
34ebcb3bfd3c62bfd43c8144766ad8af56aa236a
|
buildbot/cbuildbot_config_unittest.py
|
buildbot/cbuildbot_config_unittest.py
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for config. Needs to be run inside of chroot for mox."""
import mox
import sys
import unittest
import constants
sys.path.append(constants.SOURCE_ROOT)
import chromite.buildbot.cbuildbot_config as config
class CBuildBotTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
def testConfigUseflags(self):
""" Useflags must be lists.
Strings are interpreted as arrays of characters for this, which is not
useful.
"""
for x in config.config:
useflags = config.config[x].get('useflags')
if not useflags is None:
self.assertTrue(
isinstance(useflags, list),
'Config for %s: useflags should be a list.' % x)
if __name__ == '__main__':
unittest.main()
|
Add a simple unit test for the config.
|
Add a simple unit test for the config.
BUG=http://code.google.com/p/chromium-os/issues/detail?id=14837
TEST=It is a test. Ran it, with current and with a previous bad config.
Change-Id: Ib20c89b6169dbc80a5c49487d732ccd20b7ab7cb
|
Python
|
bsd-3-clause
|
zhang0137/chromite,chadversary/chromiumos.chromite,bpsinc-native/src_third_party_chromite,coreos/chromite,coreos/chromite,zhang0137/chromite,bpsinc-native/src_third_party_chromite,bpsinc-native/src_third_party_chromite,chadversary/chromiumos.chromite,coreos/chromite,zhang0137/chromite
|
Add a simple unit test for the config.
BUG=http://code.google.com/p/chromium-os/issues/detail?id=14837
TEST=It is a test. Ran it, with current and with a previous bad config.
Change-Id: Ib20c89b6169dbc80a5c49487d732ccd20b7ab7cb
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for config. Needs to be run inside of chroot for mox."""
import mox
import sys
import unittest
import constants
sys.path.append(constants.SOURCE_ROOT)
import chromite.buildbot.cbuildbot_config as config
class CBuildBotTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
def testConfigUseflags(self):
""" Useflags must be lists.
Strings are interpreted as arrays of characters for this, which is not
useful.
"""
for x in config.config:
useflags = config.config[x].get('useflags')
if not useflags is None:
self.assertTrue(
isinstance(useflags, list),
'Config for %s: useflags should be a list.' % x)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a simple unit test for the config.
BUG=http://code.google.com/p/chromium-os/issues/detail?id=14837
TEST=It is a test. Ran it, with current and with a previous bad config.
Change-Id: Ib20c89b6169dbc80a5c49487d732ccd20b7ab7cb<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for config. Needs to be run inside of chroot for mox."""
import mox
import sys
import unittest
import constants
sys.path.append(constants.SOURCE_ROOT)
import chromite.buildbot.cbuildbot_config as config
class CBuildBotTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
def testConfigUseflags(self):
""" Useflags must be lists.
Strings are interpreted as arrays of characters for this, which is not
useful.
"""
for x in config.config:
useflags = config.config[x].get('useflags')
if not useflags is None:
self.assertTrue(
isinstance(useflags, list),
'Config for %s: useflags should be a list.' % x)
if __name__ == '__main__':
unittest.main()
|
Add a simple unit test for the config.
BUG=http://code.google.com/p/chromium-os/issues/detail?id=14837
TEST=It is a test. Ran it, with current and with a previous bad config.
Change-Id: Ib20c89b6169dbc80a5c49487d732ccd20b7ab7cb#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for config. Needs to be run inside of chroot for mox."""
import mox
import sys
import unittest
import constants
sys.path.append(constants.SOURCE_ROOT)
import chromite.buildbot.cbuildbot_config as config
class CBuildBotTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
def testConfigUseflags(self):
""" Useflags must be lists.
Strings are interpreted as arrays of characters for this, which is not
useful.
"""
for x in config.config:
useflags = config.config[x].get('useflags')
if not useflags is None:
self.assertTrue(
isinstance(useflags, list),
'Config for %s: useflags should be a list.' % x)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a simple unit test for the config.
BUG=http://code.google.com/p/chromium-os/issues/detail?id=14837
TEST=It is a test. Ran it, with current and with a previous bad config.
Change-Id: Ib20c89b6169dbc80a5c49487d732ccd20b7ab7cb<commit_after>#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for config. Needs to be run inside of chroot for mox."""
import mox
import sys
import unittest
import constants
sys.path.append(constants.SOURCE_ROOT)
import chromite.buildbot.cbuildbot_config as config
class CBuildBotTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
def testConfigUseflags(self):
""" Useflags must be lists.
Strings are interpreted as arrays of characters for this, which is not
useful.
"""
for x in config.config:
useflags = config.config[x].get('useflags')
if not useflags is None:
self.assertTrue(
isinstance(useflags, list),
'Config for %s: useflags should be a list.' % x)
if __name__ == '__main__':
unittest.main()
|
|
ce12d292d96b589c67c8321efa23e1db8364bfe8
|
test/test_cascade.py
|
test/test_cascade.py
|
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlywebplugins.mysql3 import (Base, sText, sTag, sTiddler,
sRevision, sField, Session)
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_cascade():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'one', u'holder')
tiddler.text = u'text'
tiddler.tags = [u'tag']
tiddler.fields = {u'fieldone': u'valueone'}
store.put(tiddler)
def count_em(count, message):
text_count = store.storage.session.query(sText).count()
tag_count = store.storage.session.query(sTag).count()
tiddler_count = store.storage.session.query(sTiddler).count()
revision_count = store.storage.session.query(sRevision).count()
field_count = store.storage.session.query(sField).count()
store.storage.session.commit()
message = ('%s, but got: text: %s, tag: %s, tiddler: %s, '
'revision: %s, field: %s') % (message, text_count, tag_count,
tiddler_count, revision_count, field_count)
assert (text_count == tag_count == tiddler_count
== revision_count == field_count == count), message
count_em(1, '1 row for the tiddler everywhere')
store.delete(tiddler)
count_em(0, '0 rows for the tiddler everywhere')
|
Add a test to check that delete cascade is working
|
Add a test to check that delete cascade is working
This was a bit hard to get right, as there were lingering
sessions. The session.remove() in the test is _critical_.
|
Python
|
bsd-3-clause
|
tiddlyweb/tiddlywebplugins.mysql
|
Add a test to check that delete cascade is working
This was a bit hard to get right, as there were lingering
sessions. The session.remove() in the test is _critical_.
|
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlywebplugins.mysql3 import (Base, sText, sTag, sTiddler,
sRevision, sField, Session)
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_cascade():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'one', u'holder')
tiddler.text = u'text'
tiddler.tags = [u'tag']
tiddler.fields = {u'fieldone': u'valueone'}
store.put(tiddler)
def count_em(count, message):
text_count = store.storage.session.query(sText).count()
tag_count = store.storage.session.query(sTag).count()
tiddler_count = store.storage.session.query(sTiddler).count()
revision_count = store.storage.session.query(sRevision).count()
field_count = store.storage.session.query(sField).count()
store.storage.session.commit()
message = ('%s, but got: text: %s, tag: %s, tiddler: %s, '
'revision: %s, field: %s') % (message, text_count, tag_count,
tiddler_count, revision_count, field_count)
assert (text_count == tag_count == tiddler_count
== revision_count == field_count == count), message
count_em(1, '1 row for the tiddler everywhere')
store.delete(tiddler)
count_em(0, '0 rows for the tiddler everywhere')
|
<commit_before><commit_msg>Add a test to check that delete cascade is working
This was a bit hard to get right, as there were lingering
sessions. The session.remove() in the test is _critical_.<commit_after>
|
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlywebplugins.mysql3 import (Base, sText, sTag, sTiddler,
sRevision, sField, Session)
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_cascade():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'one', u'holder')
tiddler.text = u'text'
tiddler.tags = [u'tag']
tiddler.fields = {u'fieldone': u'valueone'}
store.put(tiddler)
def count_em(count, message):
text_count = store.storage.session.query(sText).count()
tag_count = store.storage.session.query(sTag).count()
tiddler_count = store.storage.session.query(sTiddler).count()
revision_count = store.storage.session.query(sRevision).count()
field_count = store.storage.session.query(sField).count()
store.storage.session.commit()
message = ('%s, but got: text: %s, tag: %s, tiddler: %s, '
'revision: %s, field: %s') % (message, text_count, tag_count,
tiddler_count, revision_count, field_count)
assert (text_count == tag_count == tiddler_count
== revision_count == field_count == count), message
count_em(1, '1 row for the tiddler everywhere')
store.delete(tiddler)
count_em(0, '0 rows for the tiddler everywhere')
|
Add a test to check that delete cascade is working
This was a bit hard to get right, as there were lingering
sessions. The session.remove() in the test is _critical_.
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlywebplugins.mysql3 import (Base, sText, sTag, sTiddler,
sRevision, sField, Session)
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_cascade():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'one', u'holder')
tiddler.text = u'text'
tiddler.tags = [u'tag']
tiddler.fields = {u'fieldone': u'valueone'}
store.put(tiddler)
def count_em(count, message):
text_count = store.storage.session.query(sText).count()
tag_count = store.storage.session.query(sTag).count()
tiddler_count = store.storage.session.query(sTiddler).count()
revision_count = store.storage.session.query(sRevision).count()
field_count = store.storage.session.query(sField).count()
store.storage.session.commit()
message = ('%s, but got: text: %s, tag: %s, tiddler: %s, '
'revision: %s, field: %s') % (message, text_count, tag_count,
tiddler_count, revision_count, field_count)
assert (text_count == tag_count == tiddler_count
== revision_count == field_count == count), message
count_em(1, '1 row for the tiddler everywhere')
store.delete(tiddler)
count_em(0, '0 rows for the tiddler everywhere')
|
<commit_before><commit_msg>Add a test to check that delete cascade is working
This was a bit hard to get right, as there were lingering
sessions. The session.remove() in the test is _critical_.<commit_after>
import os
import py.test
from tiddlyweb.config import config
from tiddlyweb.store import Store
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlywebplugins.mysql3 import (Base, sText, sTag, sTiddler,
sRevision, sField, Session)
def setup_module(module):
module.store = Store(
config['server_store'][0],
config['server_store'][1],
{'tiddlyweb.config': config}
)
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_cascade():
bag = Bag(u'holder')
store.put(bag)
tiddler = Tiddler(u'one', u'holder')
tiddler.text = u'text'
tiddler.tags = [u'tag']
tiddler.fields = {u'fieldone': u'valueone'}
store.put(tiddler)
def count_em(count, message):
text_count = store.storage.session.query(sText).count()
tag_count = store.storage.session.query(sTag).count()
tiddler_count = store.storage.session.query(sTiddler).count()
revision_count = store.storage.session.query(sRevision).count()
field_count = store.storage.session.query(sField).count()
store.storage.session.commit()
message = ('%s, but got: text: %s, tag: %s, tiddler: %s, '
'revision: %s, field: %s') % (message, text_count, tag_count,
tiddler_count, revision_count, field_count)
assert (text_count == tag_count == tiddler_count
== revision_count == field_count == count), message
count_em(1, '1 row for the tiddler everywhere')
store.delete(tiddler)
count_em(0, '0 rows for the tiddler everywhere')
|
|
7b0158277c4a4beb49c47ca7a50d22c4e2f1ec70
|
tempest/tests/test_waiters.py
|
tempest/tests/test_waiters.py
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
|
Add unit tests for image waiter
|
Add unit tests for image waiter
This commit adds unit tests for the image waiter in
tempest.common.waiters. It adds tests for both the timeout case and
the success path.
Partially implements bp unit-tests
Co-Authored-With: Sean Dague <sean.dague@samsung.com>
Change-Id: Ib0501cd3bc323fd036444dacf884879963842e50
|
Python
|
apache-2.0
|
manasi24/tempest,vedujoshi/os_tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,vmahuli/tempest,vedujoshi/tempest,Juraci/tempest,pandeyop/tempest,sebrandon1/tempest,masayukig/tempest,LIS/lis-tempest,roopali8/tempest,eggmaster/tempest,LIS/lis-tempest,Vaidyanath/tempest,cisco-openstack/tempest,JioCloud/tempest,Lilywei123/tempest,jamielennox/tempest,afaheem88/tempest_neutron,hayderimran7/tempest,akash1808/tempest,xbezdick/tempest,jaspreetw/tempest,vedujoshi/tempest,jaspreetw/tempest,zsoltdudas/lis-tempest,openstack/tempest,pandeyop/tempest,NexusIS/tempest,eggmaster/tempest,bigswitch/tempest,xbezdick/tempest,dkalashnik/tempest,redhat-cip/tempest,tudorvio/tempest,varunarya10/tempest,alinbalutoiu/tempest,CiscoSystems/tempest,flyingfish007/tempest,tudorvio/tempest,queria/my-tempest,alinbalutoiu/tempest,afaheem88/tempest,ebagdasa/tempest,danielmellado/tempest,hayderimran7/tempest,neerja28/Tempest,rakeshmi/tempest,rakeshmi/tempest,yamt/tempest,bigswitch/tempest,akash1808/tempest,afaheem88/tempest,Juniper/tempest,ebagdasa/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,cloudbase/lis-tempest,manasi24/jiocloud-tempest-qatempest,tonyli71/tempest,flyingfish007/tempest,roopali8/tempest,jamielennox/tempest,afaheem88/tempest_neutron,cisco-openstack/tempest,Lilywei123/tempest,varunarya10/tempest,JioCloud/tempest,nunogt/tempest,hpcloud-mon/tempest,Tesora/tesora-tempest,Juraci/tempest,nunogt/tempest,izadorozhna/tempest,izadorozhna/tempest,Tesora/tesora-tempest,tonyli71/tempest,queria/my-tempest,neerja28/Tempest,Mirantis/tempest,Vaidyanath/tempest,rzarzynski/tempest,Juniper/tempest,cloudbase/lis-tempest,Mirantis/tempest,manasi24/tempest,pczerkas/tempest,sebrandon1/tempest,NexusIS/tempest,zsoltdudas/lis-tempest,CiscoSystems/tempest,vedujoshi/os_tempest,danielmellado/tempest,dkalashnik/tempest,masayukig/tempest,yamt/tempest,openstack/tempest,manasi24/jiocloud-tempest-qatempest,pczerkas/tempest,vmahuli/tempest,redhat-cip/tempest,rzarzynski/tempest,hpcloud-mon/tempest
|
Add unit tests for image waiter
This commit adds unit tests for the image waiter in
tempest.common.waiters. It adds tests for both the timeout case and
the success path.
Partially implements bp unit-tests
Co-Authored-With: Sean Dague <sean.dague@samsung.com>
Change-Id: Ib0501cd3bc323fd036444dacf884879963842e50
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
|
<commit_before><commit_msg>Add unit tests for image waiter
This commit adds unit tests for the image waiter in
tempest.common.waiters. It adds tests for both the timeout case and
the success path.
Partially implements bp unit-tests
Co-Authored-With: Sean Dague <sean.dague@samsung.com>
Change-Id: Ib0501cd3bc323fd036444dacf884879963842e50<commit_after>
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
|
Add unit tests for image waiter
This commit adds unit tests for the image waiter in
tempest.common.waiters. It adds tests for both the timeout case and
the success path.
Partially implements bp unit-tests
Co-Authored-With: Sean Dague <sean.dague@samsung.com>
Change-Id: Ib0501cd3bc323fd036444dacf884879963842e50# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
|
<commit_before><commit_msg>Add unit tests for image waiter
This commit adds unit tests for the image waiter in
tempest.common.waiters. It adds tests for both the timeout case and
the success path.
Partially implements bp unit-tests
Co-Authored-With: Sean Dague <sean.dague@samsung.com>
Change-Id: Ib0501cd3bc323fd036444dacf884879963842e50<commit_after># Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
|
|
1702cca5bd207d90a796b5cc4fc61d4d574be929
|
tests/test_header.py
|
tests/test_header.py
|
"""
test_header
~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.header` module.
"""
import pytest
from adbwp import header
@pytest.mark.xfail(reason='Not Implemented')
def test_stub():
assert False
|
Add test module for header module.
|
Add test module for header module.
|
Python
|
apache-2.0
|
adbpy/wire-protocol
|
Add test module for header module.
|
"""
test_header
~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.header` module.
"""
import pytest
from adbwp import header
@pytest.mark.xfail(reason='Not Implemented')
def test_stub():
assert False
|
<commit_before><commit_msg>Add test module for header module.<commit_after>
|
"""
test_header
~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.header` module.
"""
import pytest
from adbwp import header
@pytest.mark.xfail(reason='Not Implemented')
def test_stub():
assert False
|
Add test module for header module."""
test_header
~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.header` module.
"""
import pytest
from adbwp import header
@pytest.mark.xfail(reason='Not Implemented')
def test_stub():
assert False
|
<commit_before><commit_msg>Add test module for header module.<commit_after>"""
test_header
~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.header` module.
"""
import pytest
from adbwp import header
@pytest.mark.xfail(reason='Not Implemented')
def test_stub():
assert False
|
|
b7ff17c9b7de6860ec94cce8a516165a58b4b22e
|
aegea/lambda.py
|
aegea/lambda.py
|
"""
Manage AWS Lambda functions and their event sources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, collections, random, string
from . import config, logger
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.printing import page_output, tabulate
from .util.aws import resources, clients
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda", help=__doc__.strip())
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_parser(ls, parent=lambda_parser)
|
Add file missed in 0c99863
|
Add file missed in 0c99863
|
Python
|
apache-2.0
|
kislyuk/aegea,kislyuk/aegea,wholebiome/aegea,wholebiome/aegea,kislyuk/aegea,wholebiome/aegea
|
Add file missed in 0c99863
|
"""
Manage AWS Lambda functions and their event sources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, collections, random, string
from . import config, logger
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.printing import page_output, tabulate
from .util.aws import resources, clients
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda", help=__doc__.strip())
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_parser(ls, parent=lambda_parser)
|
<commit_before><commit_msg>Add file missed in 0c99863<commit_after>
|
"""
Manage AWS Lambda functions and their event sources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, collections, random, string
from . import config, logger
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.printing import page_output, tabulate
from .util.aws import resources, clients
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda", help=__doc__.strip())
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_parser(ls, parent=lambda_parser)
|
Add file missed in 0c99863"""
Manage AWS Lambda functions and their event sources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, collections, random, string
from . import config, logger
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.printing import page_output, tabulate
from .util.aws import resources, clients
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda", help=__doc__.strip())
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_parser(ls, parent=lambda_parser)
|
<commit_before><commit_msg>Add file missed in 0c99863<commit_after>"""
Manage AWS Lambda functions and their event sources
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, argparse, collections, random, string
from . import config, logger
from .ls import register_parser, register_listing_parser
from .util import Timestamp, paginate
from .util.printing import page_output, tabulate
from .util.aws import resources, clients
def _lambda(args):
lambda_parser.print_help()
lambda_parser = register_parser(_lambda, name="lambda", help=__doc__.strip())
def ls(args):
paginator = getattr(clients, "lambda").get_paginator("list_functions")
page_output(tabulate(paginate(paginator), args, cell_transforms={"LastModified": Timestamp}))
parser_ls = register_parser(ls, parent=lambda_parser)
|
|
16b1aa5d3d88f48dcc0047342501a8a52cd87c34
|
tools/export_wiki.py
|
tools/export_wiki.py
|
"""This script exports the wiki pages from Redmine.
All wiki pages on Redmine (https://cocomud.plan.io) are saved in the
'doc' directory.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
This script also needs BeautifulSoup:
pip install BeautifulSoup
"""
import os
import urllib2
from BeautifulSoup import BeautifulSoup
from redmine import Redmine
lang = "en"
doc = "../doc/{lang}".format(lang=lang)
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io")
# Gets the wiki pages
pages = redmine.wiki_page.filter(project_id="cocomud-client")
for page in pages:
url = "https://cocomud.plan.io/projects/cocomud-client/wiki/" \
"{title}.html".format(title=page.title)
print "Triggering URL", url
response = urllib2.urlopen(url)
content = response.read()
soup = BeautifulSoup(content)
# Find the links
for link in soup.findAll("a"):
try:
href = link["href"]
except KeyError:
continue
if href.startswith("/projects/cocomud-client/wiki/"):
link["href"] = href[30:] + ".html"
elif href.startswith("."):
link["href"] = "https://cocomud.plan.io" + href
# Write the exported HTML file
path = os.path.join(doc, page.title + ".html")
print "Writing", page.title, "in", path
file = open(path, "w")
file.write(str(soup))
file.close()
|
Add the tool to export the wiki pages from Planio
|
Add the tool to export the wiki pages from Planio
|
Python
|
bsd-3-clause
|
vlegoff/cocomud
|
Add the tool to export the wiki pages from Planio
|
"""This script exports the wiki pages from Redmine.
All wiki pages on Redmine (https://cocomud.plan.io) are saved in the
'doc' directory.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
This script also needs BeautifulSoup:
pip install BeautifulSoup
"""
import os
import urllib2
from BeautifulSoup import BeautifulSoup
from redmine import Redmine
lang = "en"
doc = "../doc/{lang}".format(lang=lang)
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io")
# Gets the wiki pages
pages = redmine.wiki_page.filter(project_id="cocomud-client")
for page in pages:
url = "https://cocomud.plan.io/projects/cocomud-client/wiki/" \
"{title}.html".format(title=page.title)
print "Triggering URL", url
response = urllib2.urlopen(url)
content = response.read()
soup = BeautifulSoup(content)
# Find the links
for link in soup.findAll("a"):
try:
href = link["href"]
except KeyError:
continue
if href.startswith("/projects/cocomud-client/wiki/"):
link["href"] = href[30:] + ".html"
elif href.startswith("."):
link["href"] = "https://cocomud.plan.io" + href
# Write the exported HTML file
path = os.path.join(doc, page.title + ".html")
print "Writing", page.title, "in", path
file = open(path, "w")
file.write(str(soup))
file.close()
|
<commit_before><commit_msg>Add the tool to export the wiki pages from Planio<commit_after>
|
"""This script exports the wiki pages from Redmine.
All wiki pages on Redmine (https://cocomud.plan.io) are saved in the
'doc' directory.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
This script also needs BeautifulSoup:
pip install BeautifulSoup
"""
import os
import urllib2
from BeautifulSoup import BeautifulSoup
from redmine import Redmine
lang = "en"
doc = "../doc/{lang}".format(lang=lang)
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io")
# Gets the wiki pages
pages = redmine.wiki_page.filter(project_id="cocomud-client")
for page in pages:
url = "https://cocomud.plan.io/projects/cocomud-client/wiki/" \
"{title}.html".format(title=page.title)
print "Triggering URL", url
response = urllib2.urlopen(url)
content = response.read()
soup = BeautifulSoup(content)
# Find the links
for link in soup.findAll("a"):
try:
href = link["href"]
except KeyError:
continue
if href.startswith("/projects/cocomud-client/wiki/"):
link["href"] = href[30:] + ".html"
elif href.startswith("."):
link["href"] = "https://cocomud.plan.io" + href
# Write the exported HTML file
path = os.path.join(doc, page.title + ".html")
print "Writing", page.title, "in", path
file = open(path, "w")
file.write(str(soup))
file.close()
|
Add the tool to export the wiki pages from Planio"""This script exports the wiki pages from Redmine.
All wiki pages on Redmine (https://cocomud.plan.io) are saved in the
'doc' directory.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
This script also needs BeautifulSoup:
pip install BeautifulSoup
"""
import os
import urllib2
from BeautifulSoup import BeautifulSoup
from redmine import Redmine
lang = "en"
doc = "../doc/{lang}".format(lang=lang)
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io")
# Gets the wiki pages
pages = redmine.wiki_page.filter(project_id="cocomud-client")
for page in pages:
url = "https://cocomud.plan.io/projects/cocomud-client/wiki/" \
"{title}.html".format(title=page.title)
print "Triggering URL", url
response = urllib2.urlopen(url)
content = response.read()
soup = BeautifulSoup(content)
# Find the links
for link in soup.findAll("a"):
try:
href = link["href"]
except KeyError:
continue
if href.startswith("/projects/cocomud-client/wiki/"):
link["href"] = href[30:] + ".html"
elif href.startswith("."):
link["href"] = "https://cocomud.plan.io" + href
# Write the exported HTML file
path = os.path.join(doc, page.title + ".html")
print "Writing", page.title, "in", path
file = open(path, "w")
file.write(str(soup))
file.close()
|
<commit_before><commit_msg>Add the tool to export the wiki pages from Planio<commit_after>"""This script exports the wiki pages from Redmine.
All wiki pages on Redmine (https://cocomud.plan.io) are saved in the
'doc' directory.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
This script also needs BeautifulSoup:
pip install BeautifulSoup
"""
import os
import urllib2
from BeautifulSoup import BeautifulSoup
from redmine import Redmine
lang = "en"
doc = "../doc/{lang}".format(lang=lang)
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io")
# Gets the wiki pages
pages = redmine.wiki_page.filter(project_id="cocomud-client")
for page in pages:
url = "https://cocomud.plan.io/projects/cocomud-client/wiki/" \
"{title}.html".format(title=page.title)
print "Triggering URL", url
response = urllib2.urlopen(url)
content = response.read()
soup = BeautifulSoup(content)
# Find the links
for link in soup.findAll("a"):
try:
href = link["href"]
except KeyError:
continue
if href.startswith("/projects/cocomud-client/wiki/"):
link["href"] = href[30:] + ".html"
elif href.startswith("."):
link["href"] = "https://cocomud.plan.io" + href
# Write the exported HTML file
path = os.path.join(doc, page.title + ".html")
print "Writing", page.title, "in", path
file = open(path, "w")
file.write(str(soup))
file.close()
|
|
a259b64d352d056f15354bac52436faaf7319456
|
tests/unit/test_eventlike_unit.py
|
tests/unit/test_eventlike_unit.py
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
import os
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.eventlike
@pytest.mark.unit
def test_fd_closed(obj):
"""Ensure you cant close the same fd twice (as it may be reused"""
old_close = os.close
os.close = lambda fd: None
obj._fd = 1
obj.close()
with pytest.raises(ValueError):
obj.close()
os.close = old_close
|
Check closing the same file twice
|
Check closing the same file twice
|
Python
|
bsd-3-clause
|
wdv4758h/butter,dasSOZO/python-butter
|
Check closing the same file twice
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
import os
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.eventlike
@pytest.mark.unit
def test_fd_closed(obj):
"""Ensure you cant close the same fd twice (as it may be reused"""
old_close = os.close
os.close = lambda fd: None
obj._fd = 1
obj.close()
with pytest.raises(ValueError):
obj.close()
os.close = old_close
|
<commit_before><commit_msg>Check closing the same file twice<commit_after>
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
import os
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.eventlike
@pytest.mark.unit
def test_fd_closed(obj):
"""Ensure you cant close the same fd twice (as it may be reused"""
old_close = os.close
os.close = lambda fd: None
obj._fd = 1
obj.close()
with pytest.raises(ValueError):
obj.close()
os.close = old_close
|
Check closing the same file twicefrom butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
import os
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.eventlike
@pytest.mark.unit
def test_fd_closed(obj):
"""Ensure you cant close the same fd twice (as it may be reused"""
old_close = os.close
os.close = lambda fd: None
obj._fd = 1
obj.close()
with pytest.raises(ValueError):
obj.close()
os.close = old_close
|
<commit_before><commit_msg>Check closing the same file twice<commit_after>from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
import os
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.eventlike
@pytest.mark.unit
def test_fd_closed(obj):
"""Ensure you cant close the same fd twice (as it may be reused"""
old_close = os.close
os.close = lambda fd: None
obj._fd = 1
obj.close()
with pytest.raises(ValueError):
obj.close()
os.close = old_close
|
|
55cd293695c5457df8168874734c668a6d028718
|
salt/_grains/digitalocean_metadata.py
|
salt/_grains/digitalocean_metadata.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: David Boucha
:copyright: © 2014 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.grains.digitalocean_metadata.py
~~~~~~~~~~~~~~~~~~~~~~~
Create a DigitalOcean grain from the DigitalOcean metadata server.
See https://developers.digitalocean.com/metadata/#metadata-in-json
Note that not all datacenters were supported when this feature was first
released.
'''
# Import Python Libs
import requests
def digitalocean():
'''
Return DigitalOcean metadata.
'''
do_svr = 'http://169.254.169.254/metadata/v1.json'
metadata = {}
try:
response = requests.get(do_svr, timeout=0.2)
if response.status_code == 200:
metadata = response.json()
except requests.exceptions.RequestException:
pass
return {'digitalocean': metadata}
|
Add digital ocean metadata grains
|
Add digital ocean metadata grains
|
Python
|
mit
|
thusoy/salt-states,thusoy/salt-states,thusoy/salt-states,thusoy/salt-states
|
Add digital ocean metadata grains
|
# -*- coding: utf-8 -*-
'''
:codeauthor: David Boucha
:copyright: © 2014 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.grains.digitalocean_metadata.py
~~~~~~~~~~~~~~~~~~~~~~~
Create a DigitalOcean grain from the DigitalOcean metadata server.
See https://developers.digitalocean.com/metadata/#metadata-in-json
Note that not all datacenters were supported when this feature was first
released.
'''
# Import Python Libs
import requests
def digitalocean():
'''
Return DigitalOcean metadata.
'''
do_svr = 'http://169.254.169.254/metadata/v1.json'
metadata = {}
try:
response = requests.get(do_svr, timeout=0.2)
if response.status_code == 200:
metadata = response.json()
except requests.exceptions.RequestException:
pass
return {'digitalocean': metadata}
|
<commit_before><commit_msg>Add digital ocean metadata grains<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: David Boucha
:copyright: © 2014 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.grains.digitalocean_metadata.py
~~~~~~~~~~~~~~~~~~~~~~~
Create a DigitalOcean grain from the DigitalOcean metadata server.
See https://developers.digitalocean.com/metadata/#metadata-in-json
Note that not all datacenters were supported when this feature was first
released.
'''
# Import Python Libs
import requests
def digitalocean():
'''
Return DigitalOcean metadata.
'''
do_svr = 'http://169.254.169.254/metadata/v1.json'
metadata = {}
try:
response = requests.get(do_svr, timeout=0.2)
if response.status_code == 200:
metadata = response.json()
except requests.exceptions.RequestException:
pass
return {'digitalocean': metadata}
|
Add digital ocean metadata grains# -*- coding: utf-8 -*-
'''
:codeauthor: David Boucha
:copyright: © 2014 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.grains.digitalocean_metadata.py
~~~~~~~~~~~~~~~~~~~~~~~
Create a DigitalOcean grain from the DigitalOcean metadata server.
See https://developers.digitalocean.com/metadata/#metadata-in-json
Note that not all datacenters were supported when this feature was first
released.
'''
# Import Python Libs
import requests
def digitalocean():
'''
Return DigitalOcean metadata.
'''
do_svr = 'http://169.254.169.254/metadata/v1.json'
metadata = {}
try:
response = requests.get(do_svr, timeout=0.2)
if response.status_code == 200:
metadata = response.json()
except requests.exceptions.RequestException:
pass
return {'digitalocean': metadata}
|
<commit_before><commit_msg>Add digital ocean metadata grains<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: David Boucha
:copyright: © 2014 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
salt.grains.digitalocean_metadata.py
~~~~~~~~~~~~~~~~~~~~~~~
Create a DigitalOcean grain from the DigitalOcean metadata server.
See https://developers.digitalocean.com/metadata/#metadata-in-json
Note that not all datacenters were supported when this feature was first
released.
'''
# Import Python Libs
import requests
def digitalocean():
'''
Return DigitalOcean metadata.
'''
do_svr = 'http://169.254.169.254/metadata/v1.json'
metadata = {}
try:
response = requests.get(do_svr, timeout=0.2)
if response.status_code == 200:
metadata = response.json()
except requests.exceptions.RequestException:
pass
return {'digitalocean': metadata}
|
|
bbd4c21c40b060b2577e3ea16443617d45a63b6c
|
src/nodeconductor_openstack/migrations/0019_remove_payable_mixin.py
|
src/nodeconductor_openstack/migrations/0019_remove_payable_mixin.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0018_replace_security_group'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='billing_backend_id',
),
migrations.RemoveField(
model_name='instance',
name='last_usage_update_time',
),
]
|
Remove payable mixin - db migrations
|
Remove payable mixin - db migrations
- nc-1554
|
Python
|
mit
|
opennode/nodeconductor-openstack
|
Remove payable mixin - db migrations
- nc-1554
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0018_replace_security_group'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='billing_backend_id',
),
migrations.RemoveField(
model_name='instance',
name='last_usage_update_time',
),
]
|
<commit_before><commit_msg>Remove payable mixin - db migrations
- nc-1554<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0018_replace_security_group'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='billing_backend_id',
),
migrations.RemoveField(
model_name='instance',
name='last_usage_update_time',
),
]
|
Remove payable mixin - db migrations
- nc-1554# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0018_replace_security_group'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='billing_backend_id',
),
migrations.RemoveField(
model_name='instance',
name='last_usage_update_time',
),
]
|
<commit_before><commit_msg>Remove payable mixin - db migrations
- nc-1554<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0018_replace_security_group'),
]
operations = [
migrations.RemoveField(
model_name='instance',
name='billing_backend_id',
),
migrations.RemoveField(
model_name='instance',
name='last_usage_update_time',
),
]
|
|
fc2aafecf45716067c5bf860a877be2dfca4b7d3
|
satsolver/hamilton.py
|
satsolver/hamilton.py
|
#!/usr/bin/python
"""
Conversion of the Hamiltonian cycle problem to SAT.
"""
from boolean import *
def hamiltonian_cycle(l):
"""
Convert a directed graph to an instance of SAT that is satisfiable
precisely when the graph has a Hamiltonian cycle.
The graph is given as a list of ordered tuples representing directed edges.
Parallel edges (in the same direction) are not supported. The vertices of
the graph are assumed to be the endpoints of the listed edges (e.g., no
isolated vertices can be specified).
The function returns a boolean expression whose literals are of two types:
- ("e", u, v), where (u, v) is a directed edge in the given graph, and
- ("v", u, i), where u is a vertex and i is an integer between 0 and n-1,
where n is the number of vertices of the graph.
The returned expression is satisfiable precisely when the graph has a
Hamiltonian cycle. If a satisfying valuation is found, a Hamiltonian
cycle can be retrieved as follows:
- the set of all literals ("e", u, v) whose value is true corresponds to
the set of directed edges (u, v) in the Hamiltonian cycle, or
- the set of all literals ("v", u_i, i) whose value is true corresponds to
the cyclically ordered sequence (u_0, u_1, ..., u_{n-1}) of vertices
visited by the Hamiltonian cycle.
"""
terms = []
vertices = set(sum([list(e) for e in l], []))
lin = {u: [] for u in vertices}
lout = {u: [] for u in vertices}
for u, v in l:
lin[v].append(u)
lout[u].append(v)
n = len(vertices)
terms.append(("v", next(iter(vertices)), 0))
for u in vertices:
terms.append(Or([("v", u, i) for i in range(n)]))
terms.append(Or([("e", v, u) for v in lin[u]]))
terms.append(Or([("e", u, v) for v in lout[u]]))
for i in range(n):
for j in range(i+1, n):
terms.append(Not(And(("v", u, i), ("v", u, j))))
ll = lin[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", v, u), ("e", ll[j], u))))
ll = lout[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", u, v), ("e", u, ll[j]))))
for i in range(n):
terms.append(Implies(And(("v", u, i), ("e", u, v)),
("v", v, (i+1) % n)))
return And(terms)
|
Add a conversion from the Hamiltonian cycle problem to SAT
|
Add a conversion from the Hamiltonian cycle problem to SAT
|
Python
|
mit
|
jaanos/LVR-2016,jaanos/LVR-2016
|
Add a conversion from the Hamiltonian cycle problem to SAT
|
#!/usr/bin/python
"""
Conversion of the Hamiltonian cycle problem to SAT.
"""
from boolean import *
def hamiltonian_cycle(l):
"""
Convert a directed graph to an instance of SAT that is satisfiable
precisely when the graph has a Hamiltonian cycle.
The graph is given as a list of ordered tuples representing directed edges.
Parallel edges (in the same direction) are not supported. The vertices of
the graph are assumed to be the endpoints of the listed edges (e.g., no
isolated vertices can be specified).
The function returns a boolean expression whose literals are of two types:
- ("e", u, v), where (u, v) is a directed edge in the given graph, and
- ("v", u, i), where u is a vertex and i is an integer between 0 and n-1,
where n is the number of vertices of the graph.
The returned expression is satisfiable precisely when the graph has a
Hamiltonian cycle. If a satisfying valuation is found, a Hamiltonian
cycle can be retrieved as follows:
- the set of all literals ("e", u, v) whose value is true corresponds to
the set of directed edges (u, v) in the Hamiltonian cycle, or
- the set of all literals ("v", u_i, i) whose value is true corresponds to
the cyclically ordered sequence (u_0, u_1, ..., u_{n-1}) of vertices
visited by the Hamiltonian cycle.
"""
terms = []
vertices = set(sum([list(e) for e in l], []))
lin = {u: [] for u in vertices}
lout = {u: [] for u in vertices}
for u, v in l:
lin[v].append(u)
lout[u].append(v)
n = len(vertices)
terms.append(("v", next(iter(vertices)), 0))
for u in vertices:
terms.append(Or([("v", u, i) for i in range(n)]))
terms.append(Or([("e", v, u) for v in lin[u]]))
terms.append(Or([("e", u, v) for v in lout[u]]))
for i in range(n):
for j in range(i+1, n):
terms.append(Not(And(("v", u, i), ("v", u, j))))
ll = lin[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", v, u), ("e", ll[j], u))))
ll = lout[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", u, v), ("e", u, ll[j]))))
for i in range(n):
terms.append(Implies(And(("v", u, i), ("e", u, v)),
("v", v, (i+1) % n)))
return And(terms)
|
<commit_before><commit_msg>Add a conversion from the Hamiltonian cycle problem to SAT<commit_after>
|
#!/usr/bin/python
"""
Conversion of the Hamiltonian cycle problem to SAT.
"""
from boolean import *
def hamiltonian_cycle(l):
"""
Convert a directed graph to an instance of SAT that is satisfiable
precisely when the graph has a Hamiltonian cycle.
The graph is given as a list of ordered tuples representing directed edges.
Parallel edges (in the same direction) are not supported. The vertices of
the graph are assumed to be the endpoints of the listed edges (e.g., no
isolated vertices can be specified).
The function returns a boolean expression whose literals are of two types:
- ("e", u, v), where (u, v) is a directed edge in the given graph, and
- ("v", u, i), where u is a vertex and i is an integer between 0 and n-1,
where n is the number of vertices of the graph.
The returned expression is satisfiable precisely when the graph has a
Hamiltonian cycle. If a satisfying valuation is found, a Hamiltonian
cycle can be retrieved as follows:
- the set of all literals ("e", u, v) whose value is true corresponds to
the set of directed edges (u, v) in the Hamiltonian cycle, or
- the set of all literals ("v", u_i, i) whose value is true corresponds to
the cyclically ordered sequence (u_0, u_1, ..., u_{n-1}) of vertices
visited by the Hamiltonian cycle.
"""
terms = []
vertices = set(sum([list(e) for e in l], []))
lin = {u: [] for u in vertices}
lout = {u: [] for u in vertices}
for u, v in l:
lin[v].append(u)
lout[u].append(v)
n = len(vertices)
terms.append(("v", next(iter(vertices)), 0))
for u in vertices:
terms.append(Or([("v", u, i) for i in range(n)]))
terms.append(Or([("e", v, u) for v in lin[u]]))
terms.append(Or([("e", u, v) for v in lout[u]]))
for i in range(n):
for j in range(i+1, n):
terms.append(Not(And(("v", u, i), ("v", u, j))))
ll = lin[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", v, u), ("e", ll[j], u))))
ll = lout[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", u, v), ("e", u, ll[j]))))
for i in range(n):
terms.append(Implies(And(("v", u, i), ("e", u, v)),
("v", v, (i+1) % n)))
return And(terms)
|
Add a conversion from the Hamiltonian cycle problem to SAT#!/usr/bin/python
"""
Conversion of the Hamiltonian cycle problem to SAT.
"""
from boolean import *
def hamiltonian_cycle(l):
"""
Convert a directed graph to an instance of SAT that is satisfiable
precisely when the graph has a Hamiltonian cycle.
The graph is given as a list of ordered tuples representing directed edges.
Parallel edges (in the same direction) are not supported. The vertices of
the graph are assumed to be the endpoints of the listed edges (e.g., no
isolated vertices can be specified).
The function returns a boolean expression whose literals are of two types:
- ("e", u, v), where (u, v) is a directed edge in the given graph, and
- ("v", u, i), where u is a vertex and i is an integer between 0 and n-1,
where n is the number of vertices of the graph.
The returned expression is satisfiable precisely when the graph has a
Hamiltonian cycle. If a satisfying valuation is found, a Hamiltonian
cycle can be retrieved as follows:
- the set of all literals ("e", u, v) whose value is true corresponds to
the set of directed edges (u, v) in the Hamiltonian cycle, or
- the set of all literals ("v", u_i, i) whose value is true corresponds to
the cyclically ordered sequence (u_0, u_1, ..., u_{n-1}) of vertices
visited by the Hamiltonian cycle.
"""
terms = []
vertices = set(sum([list(e) for e in l], []))
lin = {u: [] for u in vertices}
lout = {u: [] for u in vertices}
for u, v in l:
lin[v].append(u)
lout[u].append(v)
n = len(vertices)
terms.append(("v", next(iter(vertices)), 0))
for u in vertices:
terms.append(Or([("v", u, i) for i in range(n)]))
terms.append(Or([("e", v, u) for v in lin[u]]))
terms.append(Or([("e", u, v) for v in lout[u]]))
for i in range(n):
for j in range(i+1, n):
terms.append(Not(And(("v", u, i), ("v", u, j))))
ll = lin[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", v, u), ("e", ll[j], u))))
ll = lout[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", u, v), ("e", u, ll[j]))))
for i in range(n):
terms.append(Implies(And(("v", u, i), ("e", u, v)),
("v", v, (i+1) % n)))
return And(terms)
|
<commit_before><commit_msg>Add a conversion from the Hamiltonian cycle problem to SAT<commit_after>#!/usr/bin/python
"""
Conversion of the Hamiltonian cycle problem to SAT.
"""
from boolean import *
def hamiltonian_cycle(l):
"""
Convert a directed graph to an instance of SAT that is satisfiable
precisely when the graph has a Hamiltonian cycle.
The graph is given as a list of ordered tuples representing directed edges.
Parallel edges (in the same direction) are not supported. The vertices of
the graph are assumed to be the endpoints of the listed edges (e.g., no
isolated vertices can be specified).
The function returns a boolean expression whose literals are of two types:
- ("e", u, v), where (u, v) is a directed edge in the given graph, and
- ("v", u, i), where u is a vertex and i is an integer between 0 and n-1,
where n is the number of vertices of the graph.
The returned expression is satisfiable precisely when the graph has a
Hamiltonian cycle. If a satisfying valuation is found, a Hamiltonian
cycle can be retrieved as follows:
- the set of all literals ("e", u, v) whose value is true corresponds to
the set of directed edges (u, v) in the Hamiltonian cycle, or
- the set of all literals ("v", u_i, i) whose value is true corresponds to
the cyclically ordered sequence (u_0, u_1, ..., u_{n-1}) of vertices
visited by the Hamiltonian cycle.
"""
terms = []
vertices = set(sum([list(e) for e in l], []))
lin = {u: [] for u in vertices}
lout = {u: [] for u in vertices}
for u, v in l:
lin[v].append(u)
lout[u].append(v)
n = len(vertices)
terms.append(("v", next(iter(vertices)), 0))
for u in vertices:
terms.append(Or([("v", u, i) for i in range(n)]))
terms.append(Or([("e", v, u) for v in lin[u]]))
terms.append(Or([("e", u, v) for v in lout[u]]))
for i in range(n):
for j in range(i+1, n):
terms.append(Not(And(("v", u, i), ("v", u, j))))
ll = lin[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", v, u), ("e", ll[j], u))))
ll = lout[u]
m = len(ll)
for i in range(m):
v = ll[i]
for j in range(i+1, m):
terms.append(Not(And(("e", u, v), ("e", u, ll[j]))))
for i in range(n):
terms.append(Implies(And(("v", u, i), ("e", u, v)),
("v", v, (i+1) % n)))
return And(terms)
|
|
86fc6daa9e823370735de2061ad8765f44898aa8
|
unicode/check_utf8.py
|
unicode/check_utf8.py
|
#!/usr/bin/env python
# Check whether a file contains valid UTF-8
# From http://stackoverflow.com/a/3269323
import codecs
import sys
def checkFile(filename):
try:
with codecs.open(filename, encoding='utf-8', errors='strict') as f:
for line in f:
pass
return 0
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
return 2
except UnicodeDecodeError:
sys.stdout.write('%s contains invalid UTF-8\n' % filename)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2:
p = sys.argv[0]
sys.stderr.write('Usage: ' + p[p.rfind('/') + 1:] + ' <filename>\n')
sys.exit(2)
r = checkFile(sys.argv[1])
sys.exit(r)
|
Add script for checking file is valid utf8
|
Add script for checking file is valid utf8
|
Python
|
mit
|
manics/shell-tools,manics/shell-tools
|
Add script for checking file is valid utf8
|
#!/usr/bin/env python
# Check whether a file contains valid UTF-8
# From http://stackoverflow.com/a/3269323
import codecs
import sys
def checkFile(filename):
try:
with codecs.open(filename, encoding='utf-8', errors='strict') as f:
for line in f:
pass
return 0
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
return 2
except UnicodeDecodeError:
sys.stdout.write('%s contains invalid UTF-8\n' % filename)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2:
p = sys.argv[0]
sys.stderr.write('Usage: ' + p[p.rfind('/') + 1:] + ' <filename>\n')
sys.exit(2)
r = checkFile(sys.argv[1])
sys.exit(r)
|
<commit_before><commit_msg>Add script for checking file is valid utf8<commit_after>
|
#!/usr/bin/env python
# Check whether a file contains valid UTF-8
# From http://stackoverflow.com/a/3269323
import codecs
import sys
def checkFile(filename):
try:
with codecs.open(filename, encoding='utf-8', errors='strict') as f:
for line in f:
pass
return 0
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
return 2
except UnicodeDecodeError:
sys.stdout.write('%s contains invalid UTF-8\n' % filename)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2:
p = sys.argv[0]
sys.stderr.write('Usage: ' + p[p.rfind('/') + 1:] + ' <filename>\n')
sys.exit(2)
r = checkFile(sys.argv[1])
sys.exit(r)
|
Add script for checking file is valid utf8#!/usr/bin/env python
# Check whether a file contains valid UTF-8
# From http://stackoverflow.com/a/3269323
import codecs
import sys
def checkFile(filename):
try:
with codecs.open(filename, encoding='utf-8', errors='strict') as f:
for line in f:
pass
return 0
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
return 2
except UnicodeDecodeError:
sys.stdout.write('%s contains invalid UTF-8\n' % filename)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2:
p = sys.argv[0]
sys.stderr.write('Usage: ' + p[p.rfind('/') + 1:] + ' <filename>\n')
sys.exit(2)
r = checkFile(sys.argv[1])
sys.exit(r)
|
<commit_before><commit_msg>Add script for checking file is valid utf8<commit_after>#!/usr/bin/env python
# Check whether a file contains valid UTF-8
# From http://stackoverflow.com/a/3269323
import codecs
import sys
def checkFile(filename):
try:
with codecs.open(filename, encoding='utf-8', errors='strict') as f:
for line in f:
pass
return 0
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
return 2
except UnicodeDecodeError:
sys.stdout.write('%s contains invalid UTF-8\n' % filename)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2:
p = sys.argv[0]
sys.stderr.write('Usage: ' + p[p.rfind('/') + 1:] + ' <filename>\n')
sys.exit(2)
r = checkFile(sys.argv[1])
sys.exit(r)
|
|
b1dcd0edf943f1c849f103a355f4945c59467ca3
|
tuto-samples/arduino/stats.py
|
tuto-samples/arduino/stats.py
|
#!/usr/bin/python
# encoding: utf-8
# In order to use this script from shell:
# > ./build-samples >tempsizes
# > cat tempsizes | ./stats.py >sizes
# > rm tempsizes
# Then sizes file can be opened in LibreOffice Calc
from __future__ import with_statement
import argparse, re, sys
#TODO from example name, extract tutorial tag, index, and postfix
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.find("Building... ") >= 0:
# Find example name (everything after last /)
example = line[line.find(" ") + 1:-1]
elif line.startswith("Sketch uses"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Global variables use"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
Add script to extract Arduino IDE program size.
|
Add script to extract Arduino IDE program size.
|
Python
|
lgpl-2.1
|
jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib
|
Add script to extract Arduino IDE program size.
|
#!/usr/bin/python
# encoding: utf-8
# In order to use this script from shell:
# > ./build-samples >tempsizes
# > cat tempsizes | ./stats.py >sizes
# > rm tempsizes
# Then sizes file can be opened in LibreOffice Calc
from __future__ import with_statement
import argparse, re, sys
#TODO from example name, extract tutorial tag, index, and postfix
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.find("Building... ") >= 0:
# Find example name (everything after last /)
example = line[line.find(" ") + 1:-1]
elif line.startswith("Sketch uses"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Global variables use"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
<commit_before><commit_msg>Add script to extract Arduino IDE program size.<commit_after>
|
#!/usr/bin/python
# encoding: utf-8
# In order to use this script from shell:
# > ./build-samples >tempsizes
# > cat tempsizes | ./stats.py >sizes
# > rm tempsizes
# Then sizes file can be opened in LibreOffice Calc
from __future__ import with_statement
import argparse, re, sys
#TODO from example name, extract tutorial tag, index, and postfix
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.find("Building... ") >= 0:
# Find example name (everything after last /)
example = line[line.find(" ") + 1:-1]
elif line.startswith("Sketch uses"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Global variables use"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
Add script to extract Arduino IDE program size.#!/usr/bin/python
# encoding: utf-8
# In order to use this script from shell:
# > ./build-samples >tempsizes
# > cat tempsizes | ./stats.py >sizes
# > rm tempsizes
# Then sizes file can be opened in LibreOffice Calc
from __future__ import with_statement
import argparse, re, sys
#TODO from example name, extract tutorial tag, index, and postfix
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.find("Building... ") >= 0:
# Find example name (everything after last /)
example = line[line.find(" ") + 1:-1]
elif line.startswith("Sketch uses"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Global variables use"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
<commit_before><commit_msg>Add script to extract Arduino IDE program size.<commit_after>#!/usr/bin/python
# encoding: utf-8
# In order to use this script from shell:
# > ./build-samples >tempsizes
# > cat tempsizes | ./stats.py >sizes
# > rm tempsizes
# Then sizes file can be opened in LibreOffice Calc
from __future__ import with_statement
import argparse, re, sys
#TODO from example name, extract tutorial tag, index, and postfix
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.find("Building... ") >= 0:
# Find example name (everything after last /)
example = line[line.find(" ") + 1:-1]
elif line.startswith("Sketch uses"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Global variables use"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
|
c5caecc621107326813dc0193257810f530f7eb8
|
scripts/missing-qq.py
|
scripts/missing-qq.py
|
import os
import xml.etree.ElementTree as ET
RES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "../wikipedia/res"))
EN_STRINGS = os.path.join(RES_FOLDER, "values/strings.xml")
QQ_STRINGS = os.path.join(RES_FOLDER, "values-qq/strings.xml")
# Get ElementTree containing all message names in English
enroot = ET.parse(EN_STRINGS).getroot()
# Get ElementTree containing all documented messages
qqroot = ET.parse(QQ_STRINGS).getroot()
# Create a set to store all documented messages
qqmsgs = set()
# Add all documented messages to that set
for child in qqroot:
qqmsgs.add(child.attrib['name'])
# Iterate through all messages and check that they're documented
for child in enroot:
if child.attrib['name'] not in qqmsgs:
print child.attrib['name'] + " is undocumented!"
|
Add script to find undocumented translations.
|
Add script to find undocumented translations.
To my knowledge there's no convenient way to find out if a string is missing a
translation into a particular language. For us this means that it's not easy
to check all of our strings and make sure they have documentation for our
translators. This patch adds a Python script that, when run, will tell you
what messages are undocumented.
Change-Id: I33b51f314aea841dda9f11bbcaf22158ee73960b
|
Python
|
apache-2.0
|
Wikinaut/wikipedia-app,Duct-and-rice/KrswtkhrWiki4Android,reproio/apps-android-wikipedia,Wikinaut/wikipedia-app,parvez3019/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,Wikinaut/wikipedia-app,wikimedia/apps-android-wikipedia,wikimedia/apps-android-wikipedia,reproio/apps-android-wikipedia,carloshwa/apps-android-wikipedia,reproio/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,carloshwa/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,dbrant/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,parvez3019/apps-android-wikipedia,wikimedia/apps-android-wikipedia,Wikinaut/wikipedia-app,Duct-and-rice/KrswtkhrWiki4Android,Wikinaut/wikipedia-app,Duct-and-rice/KrswtkhrWiki4Android,dbrant/apps-android-wikipedia,carloshwa/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,reproio/apps-android-wikipedia,dbrant/apps-android-wikipedia,carloshwa/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,dbrant/apps-android-wikipedia,parvez3019/apps-android-wikipedia,dbrant/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,Duct-and-rice/KrswtkhrWiki4Android,carloshwa/apps-android-wikipedia,SAGROUP2/apps-android-wikipedia,parvez3019/apps-android-wikipedia,parvez3019/apps-android-wikipedia,reproio/apps-android-wikipedia,anirudh24seven/apps-android-wikipedia,wikimedia/apps-android-wikipedia
|
Add script to find undocumented translations.
To my knowledge there's no convenient way to find out if a string is missing a
translation into a particular language. For us this means that it's not easy
to check all of our strings and make sure they have documentation for our
translators. This patch adds a Python script that, when run, will tell you
what messages are undocumented.
Change-Id: I33b51f314aea841dda9f11bbcaf22158ee73960b
|
import os
import xml.etree.ElementTree as ET
RES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "../wikipedia/res"))
EN_STRINGS = os.path.join(RES_FOLDER, "values/strings.xml")
QQ_STRINGS = os.path.join(RES_FOLDER, "values-qq/strings.xml")
# Get ElementTree containing all message names in English
enroot = ET.parse(EN_STRINGS).getroot()
# Get ElementTree containing all documented messages
qqroot = ET.parse(QQ_STRINGS).getroot()
# Create a set to store all documented messages
qqmsgs = set()
# Add all documented messages to that set
for child in qqroot:
qqmsgs.add(child.attrib['name'])
# Iterate through all messages and check that they're documented
for child in enroot:
if child.attrib['name'] not in qqmsgs:
print child.attrib['name'] + " is undocumented!"
|
<commit_before><commit_msg>Add script to find undocumented translations.
To my knowledge there's no convenient way to find out if a string is missing a
translation into a particular language. For us this means that it's not easy
to check all of our strings and make sure they have documentation for our
translators. This patch adds a Python script that, when run, will tell you
what messages are undocumented.
Change-Id: I33b51f314aea841dda9f11bbcaf22158ee73960b<commit_after>
|
import os
import xml.etree.ElementTree as ET
RES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "../wikipedia/res"))
EN_STRINGS = os.path.join(RES_FOLDER, "values/strings.xml")
QQ_STRINGS = os.path.join(RES_FOLDER, "values-qq/strings.xml")
# Get ElementTree containing all message names in English
enroot = ET.parse(EN_STRINGS).getroot()
# Get ElementTree containing all documented messages
qqroot = ET.parse(QQ_STRINGS).getroot()
# Create a set to store all documented messages
qqmsgs = set()
# Add all documented messages to that set
for child in qqroot:
qqmsgs.add(child.attrib['name'])
# Iterate through all messages and check that they're documented
for child in enroot:
if child.attrib['name'] not in qqmsgs:
print child.attrib['name'] + " is undocumented!"
|
Add script to find undocumented translations.
To my knowledge there's no convenient way to find out if a string is missing a
translation into a particular language. For us this means that it's not easy
to check all of our strings and make sure they have documentation for our
translators. This patch adds a Python script that, when run, will tell you
what messages are undocumented.
Change-Id: I33b51f314aea841dda9f11bbcaf22158ee73960bimport os
import xml.etree.ElementTree as ET
RES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "../wikipedia/res"))
EN_STRINGS = os.path.join(RES_FOLDER, "values/strings.xml")
QQ_STRINGS = os.path.join(RES_FOLDER, "values-qq/strings.xml")
# Get ElementTree containing all message names in English
enroot = ET.parse(EN_STRINGS).getroot()
# Get ElementTree containing all documented messages
qqroot = ET.parse(QQ_STRINGS).getroot()
# Create a set to store all documented messages
qqmsgs = set()
# Add all documented messages to that set
for child in qqroot:
qqmsgs.add(child.attrib['name'])
# Iterate through all messages and check that they're documented
for child in enroot:
if child.attrib['name'] not in qqmsgs:
print child.attrib['name'] + " is undocumented!"
|
<commit_before><commit_msg>Add script to find undocumented translations.
To my knowledge there's no convenient way to find out if a string is missing a
translation into a particular language. For us this means that it's not easy
to check all of our strings and make sure they have documentation for our
translators. This patch adds a Python script that, when run, will tell you
what messages are undocumented.
Change-Id: I33b51f314aea841dda9f11bbcaf22158ee73960b<commit_after>import os
import xml.etree.ElementTree as ET
RES_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "../wikipedia/res"))
EN_STRINGS = os.path.join(RES_FOLDER, "values/strings.xml")
QQ_STRINGS = os.path.join(RES_FOLDER, "values-qq/strings.xml")
# Get ElementTree containing all message names in English
enroot = ET.parse(EN_STRINGS).getroot()
# Get ElementTree containing all documented messages
qqroot = ET.parse(QQ_STRINGS).getroot()
# Create a set to store all documented messages
qqmsgs = set()
# Add all documented messages to that set
for child in qqroot:
qqmsgs.add(child.attrib['name'])
# Iterate through all messages and check that they're documented
for child in enroot:
if child.attrib['name'] not in qqmsgs:
print child.attrib['name'] + " is undocumented!"
|
|
a58b3b3cdecfffd7aa8c7fbaa38007fcbea3061a
|
st2common/tests/unit/test_db_rbac.py
|
st2common/tests/unit/test_db_rbac.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
'RoleDBModelCRUDTestCase',
'UserRoleAssignmentDBModelCRUDTestCase',
'PermissionGrantDBModelCRUDTestCase'
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
'name': 'role_one',
'description': None,
'system': False,
'permission_grants': []
}
update_attribute_name = 'name'
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
'user': 'user_one',
'role': 'role_one',
'is_remote': True
}
update_attribute_name = 'role'
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
'resource_uid': 'pack:core',
'resource_type': 'pack',
'permission_types': []
}
update_attribute_name = 'resource_uid'
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
'group': 'some group',
'roles': ['role_one', 'role_two'],
'description': 'desc',
'enabled': True
}
update_attribute_name = 'group'
|
Add CRUD model DB test cases for RBAC models.
|
Add CRUD model DB test cases for RBAC models.
|
Python
|
apache-2.0
|
StackStorm/st2,tonybaloney/st2,StackStorm/st2,tonybaloney/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,tonybaloney/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2
|
Add CRUD model DB test cases for RBAC models.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
'RoleDBModelCRUDTestCase',
'UserRoleAssignmentDBModelCRUDTestCase',
'PermissionGrantDBModelCRUDTestCase'
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
'name': 'role_one',
'description': None,
'system': False,
'permission_grants': []
}
update_attribute_name = 'name'
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
'user': 'user_one',
'role': 'role_one',
'is_remote': True
}
update_attribute_name = 'role'
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
'resource_uid': 'pack:core',
'resource_type': 'pack',
'permission_types': []
}
update_attribute_name = 'resource_uid'
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
'group': 'some group',
'roles': ['role_one', 'role_two'],
'description': 'desc',
'enabled': True
}
update_attribute_name = 'group'
|
<commit_before><commit_msg>Add CRUD model DB test cases for RBAC models.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
'RoleDBModelCRUDTestCase',
'UserRoleAssignmentDBModelCRUDTestCase',
'PermissionGrantDBModelCRUDTestCase'
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
'name': 'role_one',
'description': None,
'system': False,
'permission_grants': []
}
update_attribute_name = 'name'
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
'user': 'user_one',
'role': 'role_one',
'is_remote': True
}
update_attribute_name = 'role'
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
'resource_uid': 'pack:core',
'resource_type': 'pack',
'permission_types': []
}
update_attribute_name = 'resource_uid'
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
'group': 'some group',
'roles': ['role_one', 'role_two'],
'description': 'desc',
'enabled': True
}
update_attribute_name = 'group'
|
Add CRUD model DB test cases for RBAC models.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
'RoleDBModelCRUDTestCase',
'UserRoleAssignmentDBModelCRUDTestCase',
'PermissionGrantDBModelCRUDTestCase'
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
'name': 'role_one',
'description': None,
'system': False,
'permission_grants': []
}
update_attribute_name = 'name'
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
'user': 'user_one',
'role': 'role_one',
'is_remote': True
}
update_attribute_name = 'role'
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
'resource_uid': 'pack:core',
'resource_type': 'pack',
'permission_types': []
}
update_attribute_name = 'resource_uid'
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
'group': 'some group',
'roles': ['role_one', 'role_two'],
'description': 'desc',
'enabled': True
}
update_attribute_name = 'group'
|
<commit_before><commit_msg>Add CRUD model DB test cases for RBAC models.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.rbac import GroupToRoleMappingDB
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.persistence.rbac import GroupToRoleMapping
from st2tests import DbTestCase
from tests.unit.base import BaseDBModelCRUDTestCase
__all__ = [
'RoleDBModelCRUDTestCase',
'UserRoleAssignmentDBModelCRUDTestCase',
'PermissionGrantDBModelCRUDTestCase'
]
class RoleDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = RoleDB
persistance_class = Role
model_class_kwargs = {
'name': 'role_one',
'description': None,
'system': False,
'permission_grants': []
}
update_attribute_name = 'name'
class UserRoleAssignmentDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = UserRoleAssignmentDB
persistance_class = UserRoleAssignment
model_class_kwargs = {
'user': 'user_one',
'role': 'role_one',
'is_remote': True
}
update_attribute_name = 'role'
class PermissionGrantDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = PermissionGrantDB
persistance_class = PermissionGrant
model_class_kwargs = {
'resource_uid': 'pack:core',
'resource_type': 'pack',
'permission_types': []
}
update_attribute_name = 'resource_uid'
class GroupToRoleMappingDBModelCRUDTestCase(BaseDBModelCRUDTestCase, DbTestCase):
model_class = GroupToRoleMappingDB
persistance_class = GroupToRoleMapping
model_class_kwargs = {
'group': 'some group',
'roles': ['role_one', 'role_two'],
'description': 'desc',
'enabled': True
}
update_attribute_name = 'group'
|
|
c3b9972b3208ac6f484a3e496e5e64dc0fbe3b3d
|
scripts/kconfig-split.py
|
scripts/kconfig-split.py
|
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
Add script to split kernel config files
|
scripts: Add script to split kernel config files
This script is slightly modified from the ChromiumOS splitconfig
It takes a number of kernel config files and prints the common
on specific kernel config options to seperate files.
Signed-off-by: Rolf Neugebauer <6fdd1d8677ab4fbcb4df3eff6e190c3298f7f742@docker.com>
|
Python
|
apache-2.0
|
JohnnyLeone/linuxkit,t-koulouris/linuxkit,mor1/linuxkit,furious-luke/linuxkit,linuxkit/linuxkit,deitch/linuxkit,eyz/linuxkit,yankcrime/linuxkit,deitch/linuxkit,djs55/linuxkit,radu-matei/linuxkit,mor1/linuxkit,eyz/linuxkit,konstruktoid/linuxkit,radu-matei/linuxkit,YuPengZTE/linuxkit,davefreitag/linuxkit,konstruktoid/linuxkit,JohnnyLeone/linuxkit,deitch/linuxkit,ndauten/linuxkit,eyz/linuxkit,konstruktoid/linuxkit,davefreitag/linuxkit,thebsdbox/linuxkit,YuPengZTE/linuxkit,yankcrime/linuxkit,djs55/linuxkit,mor1/linuxkit,thebsdbox/linuxkit,ndauten/linuxkit,mor1/linuxkit,linuxkit/linuxkit,yankcrime/linuxkit,deitch/linuxkit,ndauten/linuxkit,t-koulouris/linuxkit,justincormack/linuxkit,eyz/linuxkit,yankcrime/linuxkit,zx2c4/linuxkit,furious-luke/linuxkit,YuPengZTE/linuxkit,yankcrime/linuxkit,thebsdbox/linuxkit,djs55/linuxkit,zx2c4/linuxkit,rn/linuxkit,konstruktoid/linuxkit,davefreitag/linuxkit,djs55/linuxkit,eyz/linuxkit,JohnnyLeone/linuxkit,rn/linuxkit,radu-matei/linuxkit,t-koulouris/linuxkit,rn/linuxkit,YuPengZTE/linuxkit,linuxkit/linuxkit,deitch/linuxkit,justincormack/linuxkit,zx2c4/linuxkit,yankcrime/linuxkit,thebsdbox/linuxkit,radu-matei/linuxkit,JohnnyLeone/linuxkit,linuxkit/linuxkit,ndauten/linuxkit,furious-luke/linuxkit,justincormack/linuxkit,ndauten/linuxkit,justincormack/linuxkit,t-koulouris/linuxkit,linuxkit/linuxkit,justincormack/linuxkit,zx2c4/linuxkit,konstruktoid/linuxkit,t-koulouris/linuxkit,JohnnyLeone/linuxkit,YuPengZTE/linuxkit,djs55/linuxkit,rn/linuxkit,mor1/linuxkit,ndauten/linuxkit,davefreitag/linuxkit,zx2c4/linuxkit,YuPengZTE/linuxkit,furious-luke/linuxkit,rn/linuxkit,konstruktoid/linuxkit,furious-luke/linuxkit,thebsdbox/linuxkit,davefreitag/linuxkit,radu-matei/linuxkit,thebsdbox/linuxkit,eyz/linuxkit,justincormack/linuxkit
|
scripts: Add script to split kernel config files
This script is slightly modified from the ChromiumOS splitconfig
It takes a number of kernel config files and prints the common
on specific kernel config options to seperate files.
Signed-off-by: Rolf Neugebauer <6fdd1d8677ab4fbcb4df3eff6e190c3298f7f742@docker.com>
|
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
<commit_before><commit_msg>scripts: Add script to split kernel config files
This script is slightly modified from the ChromiumOS splitconfig
It takes a number of kernel config files and prints the common
on specific kernel config options to seperate files.
Signed-off-by: Rolf Neugebauer <6fdd1d8677ab4fbcb4df3eff6e190c3298f7f742@docker.com><commit_after>
|
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
scripts: Add script to split kernel config files
This script is slightly modified from the ChromiumOS splitconfig
It takes a number of kernel config files and prints the common
on specific kernel config options to seperate files.
Signed-off-by: Rolf Neugebauer <6fdd1d8677ab4fbcb4df3eff6e190c3298f7f742@docker.com>#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
<commit_before><commit_msg>scripts: Add script to split kernel config files
This script is slightly modified from the ChromiumOS splitconfig
It takes a number of kernel config files and prints the common
on specific kernel config options to seperate files.
Signed-off-by: Rolf Neugebauer <6fdd1d8677ab4fbcb4df3eff6e190c3298f7f742@docker.com><commit_after>#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
|
0b3a8b366853c40ff05b314e11ec1826f968e427
|
seacat/spdy/alx1_http.py
|
seacat/spdy/alx1_http.py
|
import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
def build_syn_stream_frame(frame, stream_id, host, method, path):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == SPDY_HEADER_SIZE + 8)
return struct.unpack_from("!II", frame.data, frame.position + SPDY_HEADER_SIZE)
def parse_alx1_syn_reply_frame(frame):
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position + SPDY_HEADER_SIZE)
kv = []
frame.position = frame.position + SPDY_HEADER_SIZE + 8
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
|
Add SPDU build and parse functions related to HTTP.
|
Add SPDU build and parse functions related to HTTP.
|
Python
|
bsd-3-clause
|
TeskaLabs/SeaCat-Client-Python3
|
Add SPDU build and parse functions related to HTTP.
|
import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
def build_syn_stream_frame(frame, stream_id, host, method, path):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == SPDY_HEADER_SIZE + 8)
return struct.unpack_from("!II", frame.data, frame.position + SPDY_HEADER_SIZE)
def parse_alx1_syn_reply_frame(frame):
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position + SPDY_HEADER_SIZE)
kv = []
frame.position = frame.position + SPDY_HEADER_SIZE + 8
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
|
<commit_before><commit_msg>Add SPDU build and parse functions related to HTTP.<commit_after>
|
import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
def build_syn_stream_frame(frame, stream_id, host, method, path):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == SPDY_HEADER_SIZE + 8)
return struct.unpack_from("!II", frame.data, frame.position + SPDY_HEADER_SIZE)
def parse_alx1_syn_reply_frame(frame):
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position + SPDY_HEADER_SIZE)
kv = []
frame.position = frame.position + SPDY_HEADER_SIZE + 8
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
|
Add SPDU build and parse functions related to HTTP.import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
def build_syn_stream_frame(frame, stream_id, host, method, path):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == SPDY_HEADER_SIZE + 8)
return struct.unpack_from("!II", frame.data, frame.position + SPDY_HEADER_SIZE)
def parse_alx1_syn_reply_frame(frame):
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position + SPDY_HEADER_SIZE)
kv = []
frame.position = frame.position + SPDY_HEADER_SIZE + 8
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
|
<commit_before><commit_msg>Add SPDU build and parse functions related to HTTP.<commit_after>import struct
from .spdy import *
from .vle import spdy_add_vle_string, spdy_read_vle_string
def build_syn_stream_frame(frame, stream_id, host, method, path):
hdr_len = struct.calcsize('!HH4BIIBB')
assert((frame.position + hdr_len) <= frame.capacity)
struct.pack_into('!HH4BIIBB', frame.data, frame.position,
CNTL_FRAME_VERSION_ALX1, CNTL_FRAME_TYPE_SYN_STREAM,
0, # Flags
0xFF, 0xFE, 0xFD, # Placeholder for real length
stream_id,
0,
0b00100000, # Priority
0 # Slot
)
frame.position += hdr_len
spdy_add_vle_string(frame, host)
spdy_add_vle_string(frame, method)
spdy_add_vle_string(frame, path)
# Calculate length
lenb = struct.pack('!I', frame.position - SPDY_HEADER_SIZE)
frame.data[5:8] = lenb[1:]
frame.data[4] = SPDY_FLAG_FIN
def parse_rst_stream_frame(frame):
'''returns (stream_id, status_code) '''
assert(frame.limit == SPDY_HEADER_SIZE + 8)
return struct.unpack_from("!II", frame.data, frame.position + SPDY_HEADER_SIZE)
def parse_alx1_syn_reply_frame(frame):
stream_id, status_code, _ = struct.unpack_from("!Ihh", frame.data, frame.position + SPDY_HEADER_SIZE)
kv = []
frame.position = frame.position + SPDY_HEADER_SIZE + 8
while frame.position < frame.limit:
hname = spdy_read_vle_string(frame)
vname = spdy_read_vle_string(frame)
kv.append((hname.decode('utf-8'), vname.decode('utf-8')))
return stream_id, status_code, kv
|
|
b9903fb746dfac21a2b6387dd3ccd4e00a0562e8
|
test/test_bezier_direct.py
|
test/test_bezier_direct.py
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
Test using bezier going through 4 specific points
|
Test using bezier going through 4 specific points
|
Python
|
bsd-3-clause
|
shujunqiao/cocos2d-python,shujunqiao/cocos2d-python,shujunqiao/cocos2d-python,vyscond/cocos,dangillet/cocos
|
Test using bezier going through 4 specific points
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
<commit_before><commit_msg>Test using bezier going through 4 specific points<commit_after>
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
Test using bezier going through 4 specific pointsfrom __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
<commit_before><commit_msg>Test using bezier going through 4 specific points<commit_after>from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
|
ab76f87e013e330ad50f4a81ee0c72c36cb29681
|
thefuck/rules/sudo.py
|
thefuck/rules/sudo.py
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
Add one more 'need root' phrase
|
Add one more 'need root' phrase
|
Python
|
mit
|
thinkerchan/thefuck,artiya4u/thefuck,subajat1/thefuck,scorphus/thefuck,princeofdarkness76/thefuck,mcarton/thefuck,BertieJim/thefuck,manashmndl/thefuck,ostree/thefuck,MJerty/thefuck,thinkerchan/thefuck,scorphus/thefuck,gogobebe2/thefuck,AntonChankin/thefuck,beni55/thefuck,Clpsplug/thefuck,ostree/thefuck,bigplus/thefuck,barneyElDinosaurio/thefuck,PLNech/thefuck,LawrenceHan/thefuck,beni55/thefuck,thesoulkiller/thefuck,AntonChankin/thefuck,sekaiamber/thefuck,Clpsplug/thefuck,princeofdarkness76/thefuck,hxddh/thefuck,vanita5/thefuck,nvbn/thefuck,redreamality/thefuck,SimenB/thefuck,subajat1/thefuck,lawrencebenson/thefuck,MJerty/thefuck,lawrencebenson/thefuck,roth1002/thefuck,zhangzhishan/thefuck,levythu/thefuck,NguyenHoaiNam/thefuck,thesoulkiller/thefuck,mlk/thefuck,roth1002/thefuck,BertieJim/thefuck,hxddh/thefuck,manashmndl/thefuck,mlk/thefuck,barneyElDinosaurio/thefuck,LawrenceHan/thefuck,SimenB/thefuck,nvbn/thefuck,Aeron/thefuck,PLNech/thefuck,redreamality/thefuck,levythu/thefuck,bigplus/thefuck,mcarton/thefuck,vanita5/thefuck
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
Add one more 'need root' phrase
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
<commit_before>patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
<commit_msg>Add one more 'need root' phrase<commit_after>
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
Add one more 'need root' phrasepatterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
<commit_before>patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
<commit_msg>Add one more 'need root' phrase<commit_after>patterns = ['permission denied',
'EACCES',
'pkg: Insufficient privileges',
'you cannot perform this operation unless you are root',
'non-root users cannot',
'Operation not permitted',
'root privilege',
'This command has to be run under the root user.',
'This operation requires root.',
'requested operation requires superuser privilege',
'must be run as root',
'must run as root',
'must be superuser',
'must be root',
'need to be root',
'need root',
'only root can ',
'You don\'t have access to the history DB.',
'authentication is required']
def match(command, settings):
for pattern in patterns:
if pattern.lower() in command.stderr.lower()\
or pattern.lower() in command.stdout.lower():
return True
return False
def get_new_command(command, settings):
return u'sudo {}'.format(command.script)
|
f9c6b98794f6bed718ac924d28f6d9607c7c3e84
|
rxcalc/migrations/0010_medication_admin.py
|
rxcalc/migrations/0010_medication_admin.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rxcalc', '0009_auto_20160121_2107'),
]
operations = [
migrations.AddField(
model_name='medication',
name='admin',
field=models.CharField(blank=True, default='', max_length=140),
),
]
|
Add admin field to Medication model
|
Add admin field to Medication model
|
Python
|
mit
|
onnudilol/vetcalc,onnudilol/vetcalc,onnudilol/vetcalc
|
Add admin field to Medication model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rxcalc', '0009_auto_20160121_2107'),
]
operations = [
migrations.AddField(
model_name='medication',
name='admin',
field=models.CharField(blank=True, default='', max_length=140),
),
]
|
<commit_before><commit_msg>Add admin field to Medication model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rxcalc', '0009_auto_20160121_2107'),
]
operations = [
migrations.AddField(
model_name='medication',
name='admin',
field=models.CharField(blank=True, default='', max_length=140),
),
]
|
Add admin field to Medication model# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rxcalc', '0009_auto_20160121_2107'),
]
operations = [
migrations.AddField(
model_name='medication',
name='admin',
field=models.CharField(blank=True, default='', max_length=140),
),
]
|
<commit_before><commit_msg>Add admin field to Medication model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rxcalc', '0009_auto_20160121_2107'),
]
operations = [
migrations.AddField(
model_name='medication',
name='admin',
field=models.CharField(blank=True, default='', max_length=140),
),
]
|
|
edbc13599fa3cecef123c148463b53019b16165e
|
analysis/normalize-program-path.py
|
analysis/normalize-program-path.py
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Strip prefix from "program" key. This
can be used if slightly different paths
were used to generate result sets and they
need to made comparable
"""
import argparse
import os
import logging
import pprint
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin')
parser.add_argument('prefix_to_strip', help='Prefix to strip')
parser.add_argument('output_yml', help='Output file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite {}'.format(pargs.output_yml))
return 1
logging.info('Loading YAML')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Finished loading YAML')
usedNames = set()
for r in results:
programName = r['program']
if not programName.startswith(pargs.prefix_to_strip):
logging.error('Path "{}" does not start with prefix {}'.format(programName, pargs.prefix_to_strip))
return 1
newProgramName = programName.replace(pargs.prefix_to_strip, '')
if newProgramName in usedNames:
logging.error('Error stripping prefix causes program name clash for {}'.format(programName))
return 1
usedNames.add(newProgramName)
r['program'] = newProgramName
r['original_program'] = programName
with open(pargs.output_yml, 'w') as f:
yamlString = yaml.dump(results, default_flow_style=False, Dumper=Dumper)
f.write(yamlString)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to normalise program paths.
|
Add script to normalise program paths.
|
Python
|
bsd-3-clause
|
symbooglix/boogie-runner,symbooglix/boogie-runner
|
Add script to normalise program paths.
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Strip prefix from "program" key. This
can be used if slightly different paths
were used to generate result sets and they
need to made comparable
"""
import argparse
import os
import logging
import pprint
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin')
parser.add_argument('prefix_to_strip', help='Prefix to strip')
parser.add_argument('output_yml', help='Output file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite {}'.format(pargs.output_yml))
return 1
logging.info('Loading YAML')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Finished loading YAML')
usedNames = set()
for r in results:
programName = r['program']
if not programName.startswith(pargs.prefix_to_strip):
logging.error('Path "{}" does not start with prefix {}'.format(programName, pargs.prefix_to_strip))
return 1
newProgramName = programName.replace(pargs.prefix_to_strip, '')
if newProgramName in usedNames:
logging.error('Error stripping prefix causes program name clash for {}'.format(programName))
return 1
usedNames.add(newProgramName)
r['program'] = newProgramName
r['original_program'] = programName
with open(pargs.output_yml, 'w') as f:
yamlString = yaml.dump(results, default_flow_style=False, Dumper=Dumper)
f.write(yamlString)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to normalise program paths.<commit_after>
|
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Strip prefix from "program" key. This
can be used if slightly different paths
were used to generate result sets and they
need to made comparable
"""
import argparse
import os
import logging
import pprint
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin')
parser.add_argument('prefix_to_strip', help='Prefix to strip')
parser.add_argument('output_yml', help='Output file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite {}'.format(pargs.output_yml))
return 1
logging.info('Loading YAML')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Finished loading YAML')
usedNames = set()
for r in results:
programName = r['program']
if not programName.startswith(pargs.prefix_to_strip):
logging.error('Path "{}" does not start with prefix {}'.format(programName, pargs.prefix_to_strip))
return 1
newProgramName = programName.replace(pargs.prefix_to_strip, '')
if newProgramName in usedNames:
logging.error('Error stripping prefix causes program name clash for {}'.format(programName))
return 1
usedNames.add(newProgramName)
r['program'] = newProgramName
r['original_program'] = programName
with open(pargs.output_yml, 'w') as f:
yamlString = yaml.dump(results, default_flow_style=False, Dumper=Dumper)
f.write(yamlString)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to normalise program paths.#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Strip prefix from "program" key. This
can be used if slightly different paths
were used to generate result sets and they
need to made comparable
"""
import argparse
import os
import logging
import pprint
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin')
parser.add_argument('prefix_to_strip', help='Prefix to strip')
parser.add_argument('output_yml', help='Output file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite {}'.format(pargs.output_yml))
return 1
logging.info('Loading YAML')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Finished loading YAML')
usedNames = set()
for r in results:
programName = r['program']
if not programName.startswith(pargs.prefix_to_strip):
logging.error('Path "{}" does not start with prefix {}'.format(programName, pargs.prefix_to_strip))
return 1
newProgramName = programName.replace(pargs.prefix_to_strip, '')
if newProgramName in usedNames:
logging.error('Error stripping prefix causes program name clash for {}'.format(programName))
return 1
usedNames.add(newProgramName)
r['program'] = newProgramName
r['original_program'] = programName
with open(pargs.output_yml, 'w') as f:
yamlString = yaml.dump(results, default_flow_style=False, Dumper=Dumper)
f.write(yamlString)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to normalise program paths.<commit_after>#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Strip prefix from "program" key. This
can be used if slightly different paths
were used to generate result sets and they
need to made comparable
"""
import argparse
import os
import logging
import pprint
import sys
import yaml
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin')
parser.add_argument('prefix_to_strip', help='Prefix to strip')
parser.add_argument('output_yml', help='Output file')
parser.add_argument("-l","--log-level",type=str, default="info",
dest="log_level", choices=['debug','info','warning','error'])
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if os.path.exists(pargs.output_yml):
logging.error('Refusing to overwrite {}'.format(pargs.output_yml))
return 1
logging.info('Loading YAML')
results = yaml.load(pargs.result_yml, Loader=Loader)
logging.info('Finished loading YAML')
usedNames = set()
for r in results:
programName = r['program']
if not programName.startswith(pargs.prefix_to_strip):
logging.error('Path "{}" does not start with prefix {}'.format(programName, pargs.prefix_to_strip))
return 1
newProgramName = programName.replace(pargs.prefix_to_strip, '')
if newProgramName in usedNames:
logging.error('Error stripping prefix causes program name clash for {}'.format(programName))
return 1
usedNames.add(newProgramName)
r['program'] = newProgramName
r['original_program'] = programName
with open(pargs.output_yml, 'w') as f:
yamlString = yaml.dump(results, default_flow_style=False, Dumper=Dumper)
f.write(yamlString)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
742571108e4baa2a3e177bc95f44c98a26462c7b
|
django_redis/serializers/pickle.py
|
django_redis/serializers/pickle.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
Fix small mistake; options is a argument, not a member.
|
Fix small mistake; options is a argument, not a member.
|
Python
|
bsd-3-clause
|
smahs/django-redis,zl352773277/django-redis,lucius-feng/django-redis,GetAmbassador/django-redis,yanheng/django-redis
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
Fix small mistake; options is a argument, not a member.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
<commit_msg>Fix small mistake; options is a argument, not a member.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
Fix small mistake; options is a argument, not a member.# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
<commit_msg>Fix small mistake; options is a argument, not a member.<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.encoding import force_bytes
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
from django.utils.encoding import force_unicode as force_text
from .base import BaseSerializer
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
|
849aaa0ec7107837d33cf1bf1f2d3b76c59e62c8
|
assign_binary_data.py
|
assign_binary_data.py
|
def assign_binary_data(variable_name, initial_indent, maximum_width, data_string):
"""
Assign :attr:`data_string` to :attr:`variable_name` using parentheses to wrap multiple lines as needed.
:param str variable_name: The name of the variable being defined
:param int initial_indent: The initial indentation, which should be a multiple of 4
:param int maximum_width: The maximum width for each line (often between 80 and 160)
:param str data_string: The binary data
:return str: The text of the entire variable definition
"""
variable_name = variable_name.strip()
data_buffer = StringIO()
data_buffer.write(" " * initial_indent) # start with initial indent
data_buffer.write(variable_name) # declare the variable
data_buffer.write(" = (") # opening parenthesis
line_indent = data_buffer.tell()
max_string_length = maximum_width - line_indent - 2
chars = [chr(i) for i in xrange(0, 256)]
reprs = [repr(ch).strip("\'") for ch in chars]
reprs[ord("\'")] = "\\\'"
lengths = [len(r) for r in reprs]
total = 0
data_buffer.write("\'") # start the first string
for i, ch in enumerate(compressedData):
next_total = total + lengths[ord(ch)]
if next_total > max_string_length:
data_buffer.write("\'\n") # end quote for current line, plus line separator
data_buffer.write(" " * line_indent)
data_buffer.write("\'")
total = 0
data_buffer.write(reprs[ord(ch)])
total += lengths[ord(ch)]
data_buffer.write("\')\n")
return data_buffer
|
Add script for assigning wrapped string variables (for storing binary data)
|
Add script for assigning wrapped string variables (for storing binary data)
|
Python
|
lgpl-2.1
|
achernet/pyscripts
|
Add script for assigning wrapped string variables (for storing binary data)
|
def assign_binary_data(variable_name, initial_indent, maximum_width, data_string):
"""
Assign :attr:`data_string` to :attr:`variable_name` using parentheses to wrap multiple lines as needed.
:param str variable_name: The name of the variable being defined
:param int initial_indent: The initial indentation, which should be a multiple of 4
:param int maximum_width: The maximum width for each line (often between 80 and 160)
:param str data_string: The binary data
:return str: The text of the entire variable definition
"""
variable_name = variable_name.strip()
data_buffer = StringIO()
data_buffer.write(" " * initial_indent) # start with initial indent
data_buffer.write(variable_name) # declare the variable
data_buffer.write(" = (") # opening parenthesis
line_indent = data_buffer.tell()
max_string_length = maximum_width - line_indent - 2
chars = [chr(i) for i in xrange(0, 256)]
reprs = [repr(ch).strip("\'") for ch in chars]
reprs[ord("\'")] = "\\\'"
lengths = [len(r) for r in reprs]
total = 0
data_buffer.write("\'") # start the first string
for i, ch in enumerate(compressedData):
next_total = total + lengths[ord(ch)]
if next_total > max_string_length:
data_buffer.write("\'\n") # end quote for current line, plus line separator
data_buffer.write(" " * line_indent)
data_buffer.write("\'")
total = 0
data_buffer.write(reprs[ord(ch)])
total += lengths[ord(ch)]
data_buffer.write("\')\n")
return data_buffer
|
<commit_before><commit_msg>Add script for assigning wrapped string variables (for storing binary data)<commit_after>
|
def assign_binary_data(variable_name, initial_indent, maximum_width, data_string):
"""
Assign :attr:`data_string` to :attr:`variable_name` using parentheses to wrap multiple lines as needed.
:param str variable_name: The name of the variable being defined
:param int initial_indent: The initial indentation, which should be a multiple of 4
:param int maximum_width: The maximum width for each line (often between 80 and 160)
:param str data_string: The binary data
:return str: The text of the entire variable definition
"""
variable_name = variable_name.strip()
data_buffer = StringIO()
data_buffer.write(" " * initial_indent) # start with initial indent
data_buffer.write(variable_name) # declare the variable
data_buffer.write(" = (") # opening parenthesis
line_indent = data_buffer.tell()
max_string_length = maximum_width - line_indent - 2
chars = [chr(i) for i in xrange(0, 256)]
reprs = [repr(ch).strip("\'") for ch in chars]
reprs[ord("\'")] = "\\\'"
lengths = [len(r) for r in reprs]
total = 0
data_buffer.write("\'") # start the first string
for i, ch in enumerate(compressedData):
next_total = total + lengths[ord(ch)]
if next_total > max_string_length:
data_buffer.write("\'\n") # end quote for current line, plus line separator
data_buffer.write(" " * line_indent)
data_buffer.write("\'")
total = 0
data_buffer.write(reprs[ord(ch)])
total += lengths[ord(ch)]
data_buffer.write("\')\n")
return data_buffer
|
Add script for assigning wrapped string variables (for storing binary data)
def assign_binary_data(variable_name, initial_indent, maximum_width, data_string):
"""
Assign :attr:`data_string` to :attr:`variable_name` using parentheses to wrap multiple lines as needed.
:param str variable_name: The name of the variable being defined
:param int initial_indent: The initial indentation, which should be a multiple of 4
:param int maximum_width: The maximum width for each line (often between 80 and 160)
:param str data_string: The binary data
:return str: The text of the entire variable definition
"""
variable_name = variable_name.strip()
data_buffer = StringIO()
data_buffer.write(" " * initial_indent) # start with initial indent
data_buffer.write(variable_name) # declare the variable
data_buffer.write(" = (") # opening parenthesis
line_indent = data_buffer.tell()
max_string_length = maximum_width - line_indent - 2
chars = [chr(i) for i in xrange(0, 256)]
reprs = [repr(ch).strip("\'") for ch in chars]
reprs[ord("\'")] = "\\\'"
lengths = [len(r) for r in reprs]
total = 0
data_buffer.write("\'") # start the first string
for i, ch in enumerate(compressedData):
next_total = total + lengths[ord(ch)]
if next_total > max_string_length:
data_buffer.write("\'\n") # end quote for current line, plus line separator
data_buffer.write(" " * line_indent)
data_buffer.write("\'")
total = 0
data_buffer.write(reprs[ord(ch)])
total += lengths[ord(ch)]
data_buffer.write("\')\n")
return data_buffer
|
<commit_before><commit_msg>Add script for assigning wrapped string variables (for storing binary data)<commit_after>
def assign_binary_data(variable_name, initial_indent, maximum_width, data_string):
"""
Assign :attr:`data_string` to :attr:`variable_name` using parentheses to wrap multiple lines as needed.
:param str variable_name: The name of the variable being defined
:param int initial_indent: The initial indentation, which should be a multiple of 4
:param int maximum_width: The maximum width for each line (often between 80 and 160)
:param str data_string: The binary data
:return str: The text of the entire variable definition
"""
variable_name = variable_name.strip()
data_buffer = StringIO()
data_buffer.write(" " * initial_indent) # start with initial indent
data_buffer.write(variable_name) # declare the variable
data_buffer.write(" = (") # opening parenthesis
line_indent = data_buffer.tell()
max_string_length = maximum_width - line_indent - 2
chars = [chr(i) for i in xrange(0, 256)]
reprs = [repr(ch).strip("\'") for ch in chars]
reprs[ord("\'")] = "\\\'"
lengths = [len(r) for r in reprs]
total = 0
data_buffer.write("\'") # start the first string
for i, ch in enumerate(compressedData):
next_total = total + lengths[ord(ch)]
if next_total > max_string_length:
data_buffer.write("\'\n") # end quote for current line, plus line separator
data_buffer.write(" " * line_indent)
data_buffer.write("\'")
total = 0
data_buffer.write(reprs[ord(ch)])
total += lengths[ord(ch)]
data_buffer.write("\')\n")
return data_buffer
|
|
995cbc69b41216b08639ecd549f0dbdf241e94fc
|
zerver/migrations/0383_revoke_invitations_from_deactivated_users.py
|
zerver/migrations/0383_revoke_invitations_from_deactivated_users.py
|
from typing import List
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
def revoke_invitations(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
Confirmation.INVITATION = 2
Confirmation.MULTIUSE_INVITE = 6
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
UserProfile = apps.get_model("zerver", "UserProfile")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
STATUS_REVOKED = 2
def get_valid_invite_confirmations_generated_by_users(
user_ids: List[int],
) -> List[int]:
prereg_user_ids = (
PreregistrationUser.objects.filter(referred_by_id__in=user_ids)
.exclude(status=STATUS_REVOKED)
.values_list("id", flat=True)
)
confirmation_ids = list(
Confirmation.objects.filter(
type=Confirmation.INVITATION,
object_id__in=prereg_user_ids,
expiry_date__gte=timezone_now(),
).values_list("id", flat=True)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(
referred_by_id__in=user_ids
).values_list("id", flat=True)
confirmation_ids += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
expiry_date__gte=timezone_now(),
object_id__in=multiuse_invite_ids,
).values_list("id", flat=True)
)
return confirmation_ids
print("")
for realm_id in Realm.objects.values_list("id", flat=True):
deactivated_user_ids = UserProfile.objects.filter(
is_active=False, realm_id=realm_id
).values_list("id", flat=True)
confirmation_ids = get_valid_invite_confirmations_generated_by_users(deactivated_user_ids)
print(f"Revoking Confirmations for realm {realm_id}: {confirmation_ids}")
Confirmation.objects.filter(id__in=confirmation_ids).update(expiry_date=timezone_now())
class Migration(migrations.Migration):
"""
User deactivation used to *not* revoke invitations generated by the user.
This has been fixed in the implementation, but this migration is still needed
to ensure old invitations are revoked for users who were deactivated in the past.
"""
atomic = False
dependencies = [
("zerver", "0382_create_role_based_system_groups"),
]
operations = [
migrations.RunPython(
revoke_invitations,
reverse_code=migrations.RunPython.noop,
elidable=True,
)
]
|
Add migration to revoke invites from old deactivated users.
|
migrations: Add migration to revoke invites from old deactivated users.
This is a natural follow-up to
93e8740218bef0a7fea43197dc818393a5e43928 - invitations sent by users
deactivated before the commit still need to be revoked, via a
migration.
The logic for finding the Confirmations to deactivated is based on
get_valid_invite_confirmations_generated_by_user in actions.py.
|
Python
|
apache-2.0
|
rht/zulip,zulip/zulip,zulip/zulip,andersk/zulip,kou/zulip,zulip/zulip,kou/zulip,zulip/zulip,andersk/zulip,andersk/zulip,kou/zulip,rht/zulip,rht/zulip,kou/zulip,rht/zulip,kou/zulip,andersk/zulip,andersk/zulip,zulip/zulip,kou/zulip,andersk/zulip,zulip/zulip,kou/zulip,rht/zulip,rht/zulip,andersk/zulip,zulip/zulip,rht/zulip
|
migrations: Add migration to revoke invites from old deactivated users.
This is a natural follow-up to
93e8740218bef0a7fea43197dc818393a5e43928 - invitations sent by users
deactivated before the commit still need to be revoked, via a
migration.
The logic for finding the Confirmations to deactivated is based on
get_valid_invite_confirmations_generated_by_user in actions.py.
|
from typing import List
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
def revoke_invitations(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
Confirmation.INVITATION = 2
Confirmation.MULTIUSE_INVITE = 6
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
UserProfile = apps.get_model("zerver", "UserProfile")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
STATUS_REVOKED = 2
def get_valid_invite_confirmations_generated_by_users(
user_ids: List[int],
) -> List[int]:
prereg_user_ids = (
PreregistrationUser.objects.filter(referred_by_id__in=user_ids)
.exclude(status=STATUS_REVOKED)
.values_list("id", flat=True)
)
confirmation_ids = list(
Confirmation.objects.filter(
type=Confirmation.INVITATION,
object_id__in=prereg_user_ids,
expiry_date__gte=timezone_now(),
).values_list("id", flat=True)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(
referred_by_id__in=user_ids
).values_list("id", flat=True)
confirmation_ids += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
expiry_date__gte=timezone_now(),
object_id__in=multiuse_invite_ids,
).values_list("id", flat=True)
)
return confirmation_ids
print("")
for realm_id in Realm.objects.values_list("id", flat=True):
deactivated_user_ids = UserProfile.objects.filter(
is_active=False, realm_id=realm_id
).values_list("id", flat=True)
confirmation_ids = get_valid_invite_confirmations_generated_by_users(deactivated_user_ids)
print(f"Revoking Confirmations for realm {realm_id}: {confirmation_ids}")
Confirmation.objects.filter(id__in=confirmation_ids).update(expiry_date=timezone_now())
class Migration(migrations.Migration):
"""
User deactivation used to *not* revoke invitations generated by the user.
This has been fixed in the implementation, but this migration is still needed
to ensure old invitations are revoked for users who were deactivated in the past.
"""
atomic = False
dependencies = [
("zerver", "0382_create_role_based_system_groups"),
]
operations = [
migrations.RunPython(
revoke_invitations,
reverse_code=migrations.RunPython.noop,
elidable=True,
)
]
|
<commit_before><commit_msg>migrations: Add migration to revoke invites from old deactivated users.
This is a natural follow-up to
93e8740218bef0a7fea43197dc818393a5e43928 - invitations sent by users
deactivated before the commit still need to be revoked, via a
migration.
The logic for finding the Confirmations to deactivated is based on
get_valid_invite_confirmations_generated_by_user in actions.py.<commit_after>
|
from typing import List
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
def revoke_invitations(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
Confirmation.INVITATION = 2
Confirmation.MULTIUSE_INVITE = 6
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
UserProfile = apps.get_model("zerver", "UserProfile")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
STATUS_REVOKED = 2
def get_valid_invite_confirmations_generated_by_users(
user_ids: List[int],
) -> List[int]:
prereg_user_ids = (
PreregistrationUser.objects.filter(referred_by_id__in=user_ids)
.exclude(status=STATUS_REVOKED)
.values_list("id", flat=True)
)
confirmation_ids = list(
Confirmation.objects.filter(
type=Confirmation.INVITATION,
object_id__in=prereg_user_ids,
expiry_date__gte=timezone_now(),
).values_list("id", flat=True)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(
referred_by_id__in=user_ids
).values_list("id", flat=True)
confirmation_ids += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
expiry_date__gte=timezone_now(),
object_id__in=multiuse_invite_ids,
).values_list("id", flat=True)
)
return confirmation_ids
print("")
for realm_id in Realm.objects.values_list("id", flat=True):
deactivated_user_ids = UserProfile.objects.filter(
is_active=False, realm_id=realm_id
).values_list("id", flat=True)
confirmation_ids = get_valid_invite_confirmations_generated_by_users(deactivated_user_ids)
print(f"Revoking Confirmations for realm {realm_id}: {confirmation_ids}")
Confirmation.objects.filter(id__in=confirmation_ids).update(expiry_date=timezone_now())
class Migration(migrations.Migration):
"""
User deactivation used to *not* revoke invitations generated by the user.
This has been fixed in the implementation, but this migration is still needed
to ensure old invitations are revoked for users who were deactivated in the past.
"""
atomic = False
dependencies = [
("zerver", "0382_create_role_based_system_groups"),
]
operations = [
migrations.RunPython(
revoke_invitations,
reverse_code=migrations.RunPython.noop,
elidable=True,
)
]
|
migrations: Add migration to revoke invites from old deactivated users.
This is a natural follow-up to
93e8740218bef0a7fea43197dc818393a5e43928 - invitations sent by users
deactivated before the commit still need to be revoked, via a
migration.
The logic for finding the Confirmations to deactivated is based on
get_valid_invite_confirmations_generated_by_user in actions.py.from typing import List
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
def revoke_invitations(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
Confirmation.INVITATION = 2
Confirmation.MULTIUSE_INVITE = 6
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
UserProfile = apps.get_model("zerver", "UserProfile")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
STATUS_REVOKED = 2
def get_valid_invite_confirmations_generated_by_users(
user_ids: List[int],
) -> List[int]:
prereg_user_ids = (
PreregistrationUser.objects.filter(referred_by_id__in=user_ids)
.exclude(status=STATUS_REVOKED)
.values_list("id", flat=True)
)
confirmation_ids = list(
Confirmation.objects.filter(
type=Confirmation.INVITATION,
object_id__in=prereg_user_ids,
expiry_date__gte=timezone_now(),
).values_list("id", flat=True)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(
referred_by_id__in=user_ids
).values_list("id", flat=True)
confirmation_ids += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
expiry_date__gte=timezone_now(),
object_id__in=multiuse_invite_ids,
).values_list("id", flat=True)
)
return confirmation_ids
print("")
for realm_id in Realm.objects.values_list("id", flat=True):
deactivated_user_ids = UserProfile.objects.filter(
is_active=False, realm_id=realm_id
).values_list("id", flat=True)
confirmation_ids = get_valid_invite_confirmations_generated_by_users(deactivated_user_ids)
print(f"Revoking Confirmations for realm {realm_id}: {confirmation_ids}")
Confirmation.objects.filter(id__in=confirmation_ids).update(expiry_date=timezone_now())
class Migration(migrations.Migration):
"""
User deactivation used to *not* revoke invitations generated by the user.
This has been fixed in the implementation, but this migration is still needed
to ensure old invitations are revoked for users who were deactivated in the past.
"""
atomic = False
dependencies = [
("zerver", "0382_create_role_based_system_groups"),
]
operations = [
migrations.RunPython(
revoke_invitations,
reverse_code=migrations.RunPython.noop,
elidable=True,
)
]
|
<commit_before><commit_msg>migrations: Add migration to revoke invites from old deactivated users.
This is a natural follow-up to
93e8740218bef0a7fea43197dc818393a5e43928 - invitations sent by users
deactivated before the commit still need to be revoked, via a
migration.
The logic for finding the Confirmations to deactivated is based on
get_valid_invite_confirmations_generated_by_user in actions.py.<commit_after>from typing import List
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
def revoke_invitations(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Confirmation = apps.get_model("confirmation", "Confirmation")
Confirmation.INVITATION = 2
Confirmation.MULTIUSE_INVITE = 6
PreregistrationUser = apps.get_model("zerver", "PreregistrationUser")
UserProfile = apps.get_model("zerver", "UserProfile")
MultiuseInvite = apps.get_model("zerver", "MultiuseInvite")
STATUS_REVOKED = 2
def get_valid_invite_confirmations_generated_by_users(
user_ids: List[int],
) -> List[int]:
prereg_user_ids = (
PreregistrationUser.objects.filter(referred_by_id__in=user_ids)
.exclude(status=STATUS_REVOKED)
.values_list("id", flat=True)
)
confirmation_ids = list(
Confirmation.objects.filter(
type=Confirmation.INVITATION,
object_id__in=prereg_user_ids,
expiry_date__gte=timezone_now(),
).values_list("id", flat=True)
)
multiuse_invite_ids = MultiuseInvite.objects.filter(
referred_by_id__in=user_ids
).values_list("id", flat=True)
confirmation_ids += list(
Confirmation.objects.filter(
type=Confirmation.MULTIUSE_INVITE,
expiry_date__gte=timezone_now(),
object_id__in=multiuse_invite_ids,
).values_list("id", flat=True)
)
return confirmation_ids
print("")
for realm_id in Realm.objects.values_list("id", flat=True):
deactivated_user_ids = UserProfile.objects.filter(
is_active=False, realm_id=realm_id
).values_list("id", flat=True)
confirmation_ids = get_valid_invite_confirmations_generated_by_users(deactivated_user_ids)
print(f"Revoking Confirmations for realm {realm_id}: {confirmation_ids}")
Confirmation.objects.filter(id__in=confirmation_ids).update(expiry_date=timezone_now())
class Migration(migrations.Migration):
"""
User deactivation used to *not* revoke invitations generated by the user.
This has been fixed in the implementation, but this migration is still needed
to ensure old invitations are revoked for users who were deactivated in the past.
"""
atomic = False
dependencies = [
("zerver", "0382_create_role_based_system_groups"),
]
operations = [
migrations.RunPython(
revoke_invitations,
reverse_code=migrations.RunPython.noop,
elidable=True,
)
]
|
|
e2fbe143f9df142688683c5d90981284cfb71c69
|
game.py
|
game.py
|
from collections import namedtuple
import itertools
# rows: list of lists for top, middle, bottom rows
# draw: whatever has been drawn
# remaining: set of remaining cards
PineappleGame1State = namedtuple('PineappleGame1State', ['rows', 'draw', 'remaining'])
CARD_VALUES = '23456789TJQKA'
def card_value(card):
return CARD_VALUES.index(card[0]) + 2
def compute_hand(cards):
# Sort cards descending
cards.sort(lambda x, y: card_value(y) - card_value(x))
if len(cards) > 3:
# TODO: Check flushes, straights
pass
mults = []
cur_streak = 1
for i in range(len(cards) - 1):
if cards[i][0] == cards[i+1][0]:
cur_streak += 1
else:
mults += [(cur_streak, card_value(cards[i]))]
cur_streak = 1
mults += [(cur_streak, card_value(cards[i]))]
mults.sort(lambda x, y: -cmp(x, y))
# Check [NUM] of a kind hands
if mults[0][0] == 4:
hand_name = "4"
elif mults[0][0] == 3:
if len(mults) > 1 and mults[1][0] == 2:
hand_name = "3+2"
else:
hand_name = "3"
elif mults[0][0] == 2:
if mults[1][0] == 2:
hand_name = "2+2"
else:
hand_name = "2"
else:
hand_name = "1"
return tuple([hand_name] + [x[1] for x in mults])
class PineappleGame1(object):
'''
A game of Pineapple with only one player and no opponent cards shown.
'''
def __init__(self):
self.cards = [a + b for a, b in itertools.product(CARD_VALUES, 'CDHS')]
assert len(self.cards) == 52
self.init_remaining = set(self.cards)
def is_end(self, state):
return len(self.rows[0]) == 3 and len(self.rows[1]) == 5 and len(self.rows[2]) == 5
# Only call when is_end is true
def utility(self, state):
# TODO: compute royalties
raise NotImplementedError
def actions(self, state):
open_spaces = [len(x) for x in rows]
raise NotImplementedError
game = PineappleGame1()
|
Add basic N of a kind evaluation function for final hand.
|
Add basic N of a kind evaluation function for final hand.
|
Python
|
mit
|
session-id/pineapple-ai
|
Add basic N of a kind evaluation function for final hand.
|
from collections import namedtuple
import itertools
# rows: list of lists for top, middle, bottom rows
# draw: whatever has been drawn
# remaining: set of remaining cards
PineappleGame1State = namedtuple('PineappleGame1State', ['rows', 'draw', 'remaining'])
CARD_VALUES = '23456789TJQKA'
def card_value(card):
return CARD_VALUES.index(card[0]) + 2
def compute_hand(cards):
# Sort cards descending
cards.sort(lambda x, y: card_value(y) - card_value(x))
if len(cards) > 3:
# TODO: Check flushes, straights
pass
mults = []
cur_streak = 1
for i in range(len(cards) - 1):
if cards[i][0] == cards[i+1][0]:
cur_streak += 1
else:
mults += [(cur_streak, card_value(cards[i]))]
cur_streak = 1
mults += [(cur_streak, card_value(cards[i]))]
mults.sort(lambda x, y: -cmp(x, y))
# Check [NUM] of a kind hands
if mults[0][0] == 4:
hand_name = "4"
elif mults[0][0] == 3:
if len(mults) > 1 and mults[1][0] == 2:
hand_name = "3+2"
else:
hand_name = "3"
elif mults[0][0] == 2:
if mults[1][0] == 2:
hand_name = "2+2"
else:
hand_name = "2"
else:
hand_name = "1"
return tuple([hand_name] + [x[1] for x in mults])
class PineappleGame1(object):
'''
A game of Pineapple with only one player and no opponent cards shown.
'''
def __init__(self):
self.cards = [a + b for a, b in itertools.product(CARD_VALUES, 'CDHS')]
assert len(self.cards) == 52
self.init_remaining = set(self.cards)
def is_end(self, state):
return len(self.rows[0]) == 3 and len(self.rows[1]) == 5 and len(self.rows[2]) == 5
# Only call when is_end is true
def utility(self, state):
# TODO: compute royalties
raise NotImplementedError
def actions(self, state):
open_spaces = [len(x) for x in rows]
raise NotImplementedError
game = PineappleGame1()
|
<commit_before><commit_msg>Add basic N of a kind evaluation function for final hand.<commit_after>
|
from collections import namedtuple
import itertools
# rows: list of lists for top, middle, bottom rows
# draw: whatever has been drawn
# remaining: set of remaining cards
PineappleGame1State = namedtuple('PineappleGame1State', ['rows', 'draw', 'remaining'])
CARD_VALUES = '23456789TJQKA'
def card_value(card):
return CARD_VALUES.index(card[0]) + 2
def compute_hand(cards):
# Sort cards descending
cards.sort(lambda x, y: card_value(y) - card_value(x))
if len(cards) > 3:
# TODO: Check flushes, straights
pass
mults = []
cur_streak = 1
for i in range(len(cards) - 1):
if cards[i][0] == cards[i+1][0]:
cur_streak += 1
else:
mults += [(cur_streak, card_value(cards[i]))]
cur_streak = 1
mults += [(cur_streak, card_value(cards[i]))]
mults.sort(lambda x, y: -cmp(x, y))
# Check [NUM] of a kind hands
if mults[0][0] == 4:
hand_name = "4"
elif mults[0][0] == 3:
if len(mults) > 1 and mults[1][0] == 2:
hand_name = "3+2"
else:
hand_name = "3"
elif mults[0][0] == 2:
if mults[1][0] == 2:
hand_name = "2+2"
else:
hand_name = "2"
else:
hand_name = "1"
return tuple([hand_name] + [x[1] for x in mults])
class PineappleGame1(object):
'''
A game of Pineapple with only one player and no opponent cards shown.
'''
def __init__(self):
self.cards = [a + b for a, b in itertools.product(CARD_VALUES, 'CDHS')]
assert len(self.cards) == 52
self.init_remaining = set(self.cards)
def is_end(self, state):
return len(self.rows[0]) == 3 and len(self.rows[1]) == 5 and len(self.rows[2]) == 5
# Only call when is_end is true
def utility(self, state):
# TODO: compute royalties
raise NotImplementedError
def actions(self, state):
open_spaces = [len(x) for x in rows]
raise NotImplementedError
game = PineappleGame1()
|
Add basic N of a kind evaluation function for final hand.from collections import namedtuple
import itertools
# rows: list of lists for top, middle, bottom rows
# draw: whatever has been drawn
# remaining: set of remaining cards
PineappleGame1State = namedtuple('PineappleGame1State', ['rows', 'draw', 'remaining'])
CARD_VALUES = '23456789TJQKA'
def card_value(card):
return CARD_VALUES.index(card[0]) + 2
def compute_hand(cards):
# Sort cards descending
cards.sort(lambda x, y: card_value(y) - card_value(x))
if len(cards) > 3:
# TODO: Check flushes, straights
pass
mults = []
cur_streak = 1
for i in range(len(cards) - 1):
if cards[i][0] == cards[i+1][0]:
cur_streak += 1
else:
mults += [(cur_streak, card_value(cards[i]))]
cur_streak = 1
mults += [(cur_streak, card_value(cards[i]))]
mults.sort(lambda x, y: -cmp(x, y))
# Check [NUM] of a kind hands
if mults[0][0] == 4:
hand_name = "4"
elif mults[0][0] == 3:
if len(mults) > 1 and mults[1][0] == 2:
hand_name = "3+2"
else:
hand_name = "3"
elif mults[0][0] == 2:
if mults[1][0] == 2:
hand_name = "2+2"
else:
hand_name = "2"
else:
hand_name = "1"
return tuple([hand_name] + [x[1] for x in mults])
class PineappleGame1(object):
'''
A game of Pineapple with only one player and no opponent cards shown.
'''
def __init__(self):
self.cards = [a + b for a, b in itertools.product(CARD_VALUES, 'CDHS')]
assert len(self.cards) == 52
self.init_remaining = set(self.cards)
def is_end(self, state):
return len(self.rows[0]) == 3 and len(self.rows[1]) == 5 and len(self.rows[2]) == 5
# Only call when is_end is true
def utility(self, state):
# TODO: compute royalties
raise NotImplementedError
def actions(self, state):
open_spaces = [len(x) for x in rows]
raise NotImplementedError
game = PineappleGame1()
|
<commit_before><commit_msg>Add basic N of a kind evaluation function for final hand.<commit_after>from collections import namedtuple
import itertools
# rows: list of lists for top, middle, bottom rows
# draw: whatever has been drawn
# remaining: set of remaining cards
PineappleGame1State = namedtuple('PineappleGame1State', ['rows', 'draw', 'remaining'])
CARD_VALUES = '23456789TJQKA'
def card_value(card):
return CARD_VALUES.index(card[0]) + 2
def compute_hand(cards):
# Sort cards descending
cards.sort(lambda x, y: card_value(y) - card_value(x))
if len(cards) > 3:
# TODO: Check flushes, straights
pass
mults = []
cur_streak = 1
for i in range(len(cards) - 1):
if cards[i][0] == cards[i+1][0]:
cur_streak += 1
else:
mults += [(cur_streak, card_value(cards[i]))]
cur_streak = 1
mults += [(cur_streak, card_value(cards[i]))]
mults.sort(lambda x, y: -cmp(x, y))
# Check [NUM] of a kind hands
if mults[0][0] == 4:
hand_name = "4"
elif mults[0][0] == 3:
if len(mults) > 1 and mults[1][0] == 2:
hand_name = "3+2"
else:
hand_name = "3"
elif mults[0][0] == 2:
if mults[1][0] == 2:
hand_name = "2+2"
else:
hand_name = "2"
else:
hand_name = "1"
return tuple([hand_name] + [x[1] for x in mults])
class PineappleGame1(object):
'''
A game of Pineapple with only one player and no opponent cards shown.
'''
def __init__(self):
self.cards = [a + b for a, b in itertools.product(CARD_VALUES, 'CDHS')]
assert len(self.cards) == 52
self.init_remaining = set(self.cards)
def is_end(self, state):
return len(self.rows[0]) == 3 and len(self.rows[1]) == 5 and len(self.rows[2]) == 5
# Only call when is_end is true
def utility(self, state):
# TODO: compute royalties
raise NotImplementedError
def actions(self, state):
open_spaces = [len(x) for x in rows]
raise NotImplementedError
game = PineappleGame1()
|
|
bde734dc751cbfd59b40c1c2f0d60229795fae4a
|
tests/app/main/test_request_header.py
|
tests/app/main/test_request_header.py
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
client = app_.test_client()
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with app_.test_client() as client:
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
Use test_client() as context manager
|
Use test_client() as context manager
|
Python
|
mit
|
gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
client = app_.test_client()
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
Use test_client() as context manager
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with app_.test_client() as client:
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
<commit_before>import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
client = app_.test_client()
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
<commit_msg>Use test_client() as context manager<commit_after>
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with app_.test_client() as client:
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
client = app_.test_client()
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
Use test_client() as context managerimport pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with app_.test_client() as client:
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
<commit_before>import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
client = app_.test_client()
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
<commit_msg>Use test_client() as context manager<commit_after>import pytest
from tests.conftest import set_config_values
@pytest.mark.parametrize('check_proxy_header,header_value,expected_code', [
(True, 'key_1', 200),
(True, 'wrong_key', 403),
(False, 'wrong_key', 200),
(False, 'key_1', 200),
])
def test_route_correct_secret_key(app_, check_proxy_header, header_value, expected_code):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'CHECK_PROXY_HEADER': check_proxy_header,
}):
with app_.test_client() as client:
response = client.get(
path='/_status?elb=True',
headers=[
('X-Custom-forwarder', header_value),
]
)
assert response.status_code == expected_code
|
9c3d685d02d2ffe509209288b3d4164f0dfa35fc
|
statistics/with-mercy.py
|
statistics/with-mercy.py
|
from collections import namedtuple
import math
Box = namedtuple('Box', ['count', 'cost'])
MAX_COUNT = 50
PROBABILITY = 1.0 / 60
TABLE = [
Box(2, 2400),
Box(5, 6000),
Box(11, 12000),
Box(24, 24000),
Box(50, 48000)
]
PRIOR = [math.pow(1.0 - PROBABILITY, count) for count,__ in TABLE]
pick = [0, 0, 0, 0, 0]
solutions = []
def solve(idx, cost, count, prior):
if count >= MAX_COUNT:
solutions.append((cost, convert_pick(pick)))
return
if idx >= len(TABLE):
return
pick[idx] += 1
box =TABLE[idx]
solve(idx, cost + (prior*box.cost), count + box.count, prior*PRIOR[idx])
pick[idx] -= 1
solve(idx+1, cost, count, prior)
def convert_pick(pick):
ret = []
for i, n in enumerate(pick):
ret.extend([TABLE[i].count for _ in range(n)])
return ret
solve(0, 0, 0, 1.0)
answers = sorted(solutions, key=lambda t: t[0])
for cost, pick in answers[:5]:
print "%.2f, %s" % (cost, pick)
|
Add example file for witch-mercy
|
Add example file for witch-mercy
|
Python
|
mit
|
yeonghoey/yeonghoey,yeonghoey/yeonghoey,yeonghoey/yeonghoey,yeonghoey/notes,yeonghoey/yeonghoey
|
Add example file for witch-mercy
|
from collections import namedtuple
import math
Box = namedtuple('Box', ['count', 'cost'])
MAX_COUNT = 50
PROBABILITY = 1.0 / 60
TABLE = [
Box(2, 2400),
Box(5, 6000),
Box(11, 12000),
Box(24, 24000),
Box(50, 48000)
]
PRIOR = [math.pow(1.0 - PROBABILITY, count) for count,__ in TABLE]
pick = [0, 0, 0, 0, 0]
solutions = []
def solve(idx, cost, count, prior):
if count >= MAX_COUNT:
solutions.append((cost, convert_pick(pick)))
return
if idx >= len(TABLE):
return
pick[idx] += 1
box =TABLE[idx]
solve(idx, cost + (prior*box.cost), count + box.count, prior*PRIOR[idx])
pick[idx] -= 1
solve(idx+1, cost, count, prior)
def convert_pick(pick):
ret = []
for i, n in enumerate(pick):
ret.extend([TABLE[i].count for _ in range(n)])
return ret
solve(0, 0, 0, 1.0)
answers = sorted(solutions, key=lambda t: t[0])
for cost, pick in answers[:5]:
print "%.2f, %s" % (cost, pick)
|
<commit_before><commit_msg>Add example file for witch-mercy<commit_after>
|
from collections import namedtuple
import math
Box = namedtuple('Box', ['count', 'cost'])
MAX_COUNT = 50
PROBABILITY = 1.0 / 60
TABLE = [
Box(2, 2400),
Box(5, 6000),
Box(11, 12000),
Box(24, 24000),
Box(50, 48000)
]
PRIOR = [math.pow(1.0 - PROBABILITY, count) for count,__ in TABLE]
pick = [0, 0, 0, 0, 0]
solutions = []
def solve(idx, cost, count, prior):
if count >= MAX_COUNT:
solutions.append((cost, convert_pick(pick)))
return
if idx >= len(TABLE):
return
pick[idx] += 1
box =TABLE[idx]
solve(idx, cost + (prior*box.cost), count + box.count, prior*PRIOR[idx])
pick[idx] -= 1
solve(idx+1, cost, count, prior)
def convert_pick(pick):
ret = []
for i, n in enumerate(pick):
ret.extend([TABLE[i].count for _ in range(n)])
return ret
solve(0, 0, 0, 1.0)
answers = sorted(solutions, key=lambda t: t[0])
for cost, pick in answers[:5]:
print "%.2f, %s" % (cost, pick)
|
Add example file for witch-mercyfrom collections import namedtuple
import math
Box = namedtuple('Box', ['count', 'cost'])
MAX_COUNT = 50
PROBABILITY = 1.0 / 60
TABLE = [
Box(2, 2400),
Box(5, 6000),
Box(11, 12000),
Box(24, 24000),
Box(50, 48000)
]
PRIOR = [math.pow(1.0 - PROBABILITY, count) for count,__ in TABLE]
pick = [0, 0, 0, 0, 0]
solutions = []
def solve(idx, cost, count, prior):
if count >= MAX_COUNT:
solutions.append((cost, convert_pick(pick)))
return
if idx >= len(TABLE):
return
pick[idx] += 1
box =TABLE[idx]
solve(idx, cost + (prior*box.cost), count + box.count, prior*PRIOR[idx])
pick[idx] -= 1
solve(idx+1, cost, count, prior)
def convert_pick(pick):
ret = []
for i, n in enumerate(pick):
ret.extend([TABLE[i].count for _ in range(n)])
return ret
solve(0, 0, 0, 1.0)
answers = sorted(solutions, key=lambda t: t[0])
for cost, pick in answers[:5]:
print "%.2f, %s" % (cost, pick)
|
<commit_before><commit_msg>Add example file for witch-mercy<commit_after>from collections import namedtuple
import math
Box = namedtuple('Box', ['count', 'cost'])
MAX_COUNT = 50
PROBABILITY = 1.0 / 60
TABLE = [
Box(2, 2400),
Box(5, 6000),
Box(11, 12000),
Box(24, 24000),
Box(50, 48000)
]
PRIOR = [math.pow(1.0 - PROBABILITY, count) for count,__ in TABLE]
pick = [0, 0, 0, 0, 0]
solutions = []
def solve(idx, cost, count, prior):
if count >= MAX_COUNT:
solutions.append((cost, convert_pick(pick)))
return
if idx >= len(TABLE):
return
pick[idx] += 1
box =TABLE[idx]
solve(idx, cost + (prior*box.cost), count + box.count, prior*PRIOR[idx])
pick[idx] -= 1
solve(idx+1, cost, count, prior)
def convert_pick(pick):
ret = []
for i, n in enumerate(pick):
ret.extend([TABLE[i].count for _ in range(n)])
return ret
solve(0, 0, 0, 1.0)
answers = sorted(solutions, key=lambda t: t[0])
for cost, pick in answers[:5]:
print "%.2f, %s" % (cost, pick)
|
|
7572092883a3ec4dd66c209e9b47d28a5f93cba7
|
gpioCleanup.py
|
gpioCleanup.py
|
import RPi.GPIO as GPIO
GPIO.setup(16, GPIO.IN)
GPIO.setup(20, GPIO.IN)
GPIO.setup(23, GPIO.IN)
GPIO.setup(18, GPIO.IN)
GPIO.setup(17, GPIO.IN)
GPIO.setup(27, GPIO.IN)
GPIO.setup(5, GPIO.IN)
GPIO.cleanup()
|
Add gpio clean up tool
|
Add gpio clean up tool
|
Python
|
mit
|
azmiik/tweetBooth
|
Add gpio clean up tool
|
import RPi.GPIO as GPIO
GPIO.setup(16, GPIO.IN)
GPIO.setup(20, GPIO.IN)
GPIO.setup(23, GPIO.IN)
GPIO.setup(18, GPIO.IN)
GPIO.setup(17, GPIO.IN)
GPIO.setup(27, GPIO.IN)
GPIO.setup(5, GPIO.IN)
GPIO.cleanup()
|
<commit_before><commit_msg>Add gpio clean up tool<commit_after>
|
import RPi.GPIO as GPIO
GPIO.setup(16, GPIO.IN)
GPIO.setup(20, GPIO.IN)
GPIO.setup(23, GPIO.IN)
GPIO.setup(18, GPIO.IN)
GPIO.setup(17, GPIO.IN)
GPIO.setup(27, GPIO.IN)
GPIO.setup(5, GPIO.IN)
GPIO.cleanup()
|
Add gpio clean up toolimport RPi.GPIO as GPIO
GPIO.setup(16, GPIO.IN)
GPIO.setup(20, GPIO.IN)
GPIO.setup(23, GPIO.IN)
GPIO.setup(18, GPIO.IN)
GPIO.setup(17, GPIO.IN)
GPIO.setup(27, GPIO.IN)
GPIO.setup(5, GPIO.IN)
GPIO.cleanup()
|
<commit_before><commit_msg>Add gpio clean up tool<commit_after>import RPi.GPIO as GPIO
GPIO.setup(16, GPIO.IN)
GPIO.setup(20, GPIO.IN)
GPIO.setup(23, GPIO.IN)
GPIO.setup(18, GPIO.IN)
GPIO.setup(17, GPIO.IN)
GPIO.setup(27, GPIO.IN)
GPIO.setup(5, GPIO.IN)
GPIO.cleanup()
|
|
0cfe6707cf02bab74741433dbe7a91b8c5c57f38
|
cinder/tests/unit/test_fixtures.py
|
cinder/tests/unit/test_fixtures.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
Copy unit tests for StandardLogging fixture from Nova
|
Copy unit tests for StandardLogging fixture from Nova
This comes from commit f96ec4411ce89606cf52211061003c14306dcfa1
in Nova by Sean Dague <sean@dague.net>.
The StandardLogging fixture was already merged into Cinder,
this adds the unit tests that were missed when copying over
the fixture.
Change-Id: I2fbe25ec71138e4b96ff175af72a2a56c1c8f52a
Related-Bug: #1551325
|
Python
|
apache-2.0
|
Nexenta/cinder,bswartz/cinder,NetApp/cinder,mahak/cinder,Nexenta/cinder,phenoxim/cinder,Datera/cinder,openstack/cinder,cloudbase/cinder,Hybrid-Cloud/cinder,phenoxim/cinder,NetApp/cinder,Datera/cinder,cloudbase/cinder,j-griffith/cinder,openstack/cinder,mahak/cinder,ge0rgi/cinder,Hybrid-Cloud/cinder,bswartz/cinder,j-griffith/cinder,eharney/cinder,eharney/cinder
|
Copy unit tests for StandardLogging fixture from Nova
This comes from commit f96ec4411ce89606cf52211061003c14306dcfa1
in Nova by Sean Dague <sean@dague.net>.
The StandardLogging fixture was already merged into Cinder,
this adds the unit tests that were missed when copying over
the fixture.
Change-Id: I2fbe25ec71138e4b96ff175af72a2a56c1c8f52a
Related-Bug: #1551325
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
<commit_before><commit_msg>Copy unit tests for StandardLogging fixture from Nova
This comes from commit f96ec4411ce89606cf52211061003c14306dcfa1
in Nova by Sean Dague <sean@dague.net>.
The StandardLogging fixture was already merged into Cinder,
this adds the unit tests that were missed when copying over
the fixture.
Change-Id: I2fbe25ec71138e4b96ff175af72a2a56c1c8f52a
Related-Bug: #1551325<commit_after>
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
Copy unit tests for StandardLogging fixture from Nova
This comes from commit f96ec4411ce89606cf52211061003c14306dcfa1
in Nova by Sean Dague <sean@dague.net>.
The StandardLogging fixture was already merged into Cinder,
this adds the unit tests that were missed when copying over
the fixture.
Change-Id: I2fbe25ec71138e4b96ff175af72a2a56c1c8f52a
Related-Bug: #1551325# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
<commit_before><commit_msg>Copy unit tests for StandardLogging fixture from Nova
This comes from commit f96ec4411ce89606cf52211061003c14306dcfa1
in Nova by Sean Dague <sean@dague.net>.
The StandardLogging fixture was already merged into Cinder,
this adds the unit tests that were missed when copying over
the fixture.
Change-Id: I2fbe25ec71138e4b96ff175af72a2a56c1c8f52a
Related-Bug: #1551325<commit_after># Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures as fx
from oslo_log import log as logging
import testtools
from cinder.tests import fixtures
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
|
|
a522ca9af1fd7333cbd7c925596a3c66a7233b90
|
euler027.py
|
euler027.py
|
#!/usr/bin/python
"""
I don't have found a clever solution, this is a brute force analysis
"""
from math import sqrt, ceil
prime_list = [0] * 20000
def isPrime(x):
if x < 0:
return 0
if x % 2 == 0:
return 0
if prime_list[x]:
return 1
for i in range(3, ceil(sqrt(x)), 2):
if x % i == 0:
return 0
prime_list[x] = 1
return 1
max_c, max_a, max_b = 0, 0, 0
for a in range(-999, 1000):
for b in range(-999, 1000):
c = 0
test = c ** 2 + a * c + b
while isPrime(test):
c += 1
test = c ** 2 + a * c + b
if c - 1 > max_c:
max_c, max_b, max_a = c - 1, b, a
print(max_a, max_b, max_c, max_a * max_b)
|
Add solution for problem 27
|
Add solution for problem 27
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 27
|
#!/usr/bin/python
"""
I don't have found a clever solution, this is a brute force analysis
"""
from math import sqrt, ceil
prime_list = [0] * 20000
def isPrime(x):
if x < 0:
return 0
if x % 2 == 0:
return 0
if prime_list[x]:
return 1
for i in range(3, ceil(sqrt(x)), 2):
if x % i == 0:
return 0
prime_list[x] = 1
return 1
max_c, max_a, max_b = 0, 0, 0
for a in range(-999, 1000):
for b in range(-999, 1000):
c = 0
test = c ** 2 + a * c + b
while isPrime(test):
c += 1
test = c ** 2 + a * c + b
if c - 1 > max_c:
max_c, max_b, max_a = c - 1, b, a
print(max_a, max_b, max_c, max_a * max_b)
|
<commit_before><commit_msg>Add solution for problem 27<commit_after>
|
#!/usr/bin/python
"""
I don't have found a clever solution, this is a brute force analysis
"""
from math import sqrt, ceil
prime_list = [0] * 20000
def isPrime(x):
if x < 0:
return 0
if x % 2 == 0:
return 0
if prime_list[x]:
return 1
for i in range(3, ceil(sqrt(x)), 2):
if x % i == 0:
return 0
prime_list[x] = 1
return 1
max_c, max_a, max_b = 0, 0, 0
for a in range(-999, 1000):
for b in range(-999, 1000):
c = 0
test = c ** 2 + a * c + b
while isPrime(test):
c += 1
test = c ** 2 + a * c + b
if c - 1 > max_c:
max_c, max_b, max_a = c - 1, b, a
print(max_a, max_b, max_c, max_a * max_b)
|
Add solution for problem 27#!/usr/bin/python
"""
I don't have found a clever solution, this is a brute force analysis
"""
from math import sqrt, ceil
prime_list = [0] * 20000
def isPrime(x):
if x < 0:
return 0
if x % 2 == 0:
return 0
if prime_list[x]:
return 1
for i in range(3, ceil(sqrt(x)), 2):
if x % i == 0:
return 0
prime_list[x] = 1
return 1
max_c, max_a, max_b = 0, 0, 0
for a in range(-999, 1000):
for b in range(-999, 1000):
c = 0
test = c ** 2 + a * c + b
while isPrime(test):
c += 1
test = c ** 2 + a * c + b
if c - 1 > max_c:
max_c, max_b, max_a = c - 1, b, a
print(max_a, max_b, max_c, max_a * max_b)
|
<commit_before><commit_msg>Add solution for problem 27<commit_after>#!/usr/bin/python
"""
I don't have found a clever solution, this is a brute force analysis
"""
from math import sqrt, ceil
prime_list = [0] * 20000
def isPrime(x):
if x < 0:
return 0
if x % 2 == 0:
return 0
if prime_list[x]:
return 1
for i in range(3, ceil(sqrt(x)), 2):
if x % i == 0:
return 0
prime_list[x] = 1
return 1
max_c, max_a, max_b = 0, 0, 0
for a in range(-999, 1000):
for b in range(-999, 1000):
c = 0
test = c ** 2 + a * c + b
while isPrime(test):
c += 1
test = c ** 2 + a * c + b
if c - 1 > max_c:
max_c, max_b, max_a = c - 1, b, a
print(max_a, max_b, max_c, max_a * max_b)
|
|
0a0982c460f786c4f02cd99877eefc90b7b6b51b
|
backend/course/same-replace.py
|
backend/course/same-replace.py
|
import json
import getpass
import logging.config
from . import mysnu
from django.conf import settings
logging.config.dictConfig(settings.LOGGING)
def crawl():
userid = input('mySNU userid: ')
password = getpass.getpass('mySNU password: ')
session = mysnu.login(userid, password)
if session is None: # Fail to login
return None
with session as s:
same_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStd.action'
replace_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStdList.action'
params = {'cscLocale': 'ko_KR', 'strPgmCd': 'S030205'}
headers = {'Content-Type': 'application/extJs+sua; charset=UTF-8'}
payload = {
"SNU": {
"strSameSubstGrpFg": "",
"rowStatus": "insert",
"strDetaBussCd": "A0071",
"strSchyy": "2016",
"strShtmFg": "U000200002",
"strDetaShtmFg": "U000300002",
"strShtmDetaShtmFg": "U000200002U000300002",
"strBdegrSystemFg": "U000100001",
"strSbjtCd": "",
"strSbjtNm": ""
}
}
same_res = s.post(same_url, params=params, headers=headers, data=json.dumps(payload))
same_courses = json.loads(same_res.text)['GRD_COUR102']
same = [{'code': c['sbjtCd'], 'group': c['sameSubstGrpNo']} for c in same_courses]
replace_res = s.post(replace_url, params=params, headers=headers, data=json.dumps(payload))
replace_courses = json.loads(replace_res.text)['GRD_COUR102']
replace = [{'code_from': c['sbjtCd'], 'code_to': c['substSbjtCd']} for c in replace_courses]
return {'same': same, 'replace': replace}
|
Implement same&replace courses info crawler
|
Implement same&replace courses info crawler
|
Python
|
mit
|
Jhuni0123/graduate-adventure,dnsdhrj/graduate-adventure,MKRoughDiamond/graduate-adventure,skystar-p/graduate-adventure,skystar-p/graduate-adventure,skystar-p/graduate-adventure,LastOne817/graduate-adventure,skystar-p/graduate-adventure,dnsdhrj/graduate-adventure,Jhuni0123/graduate-adventure,LastOne817/graduate-adventure,LastOne817/graduate-adventure,MKRoughDiamond/graduate-adventure,Jhuni0123/graduate-adventure,Jhuni0123/graduate-adventure
|
Implement same&replace courses info crawler
|
import json
import getpass
import logging.config
from . import mysnu
from django.conf import settings
logging.config.dictConfig(settings.LOGGING)
def crawl():
userid = input('mySNU userid: ')
password = getpass.getpass('mySNU password: ')
session = mysnu.login(userid, password)
if session is None: # Fail to login
return None
with session as s:
same_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStd.action'
replace_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStdList.action'
params = {'cscLocale': 'ko_KR', 'strPgmCd': 'S030205'}
headers = {'Content-Type': 'application/extJs+sua; charset=UTF-8'}
payload = {
"SNU": {
"strSameSubstGrpFg": "",
"rowStatus": "insert",
"strDetaBussCd": "A0071",
"strSchyy": "2016",
"strShtmFg": "U000200002",
"strDetaShtmFg": "U000300002",
"strShtmDetaShtmFg": "U000200002U000300002",
"strBdegrSystemFg": "U000100001",
"strSbjtCd": "",
"strSbjtNm": ""
}
}
same_res = s.post(same_url, params=params, headers=headers, data=json.dumps(payload))
same_courses = json.loads(same_res.text)['GRD_COUR102']
same = [{'code': c['sbjtCd'], 'group': c['sameSubstGrpNo']} for c in same_courses]
replace_res = s.post(replace_url, params=params, headers=headers, data=json.dumps(payload))
replace_courses = json.loads(replace_res.text)['GRD_COUR102']
replace = [{'code_from': c['sbjtCd'], 'code_to': c['substSbjtCd']} for c in replace_courses]
return {'same': same, 'replace': replace}
|
<commit_before><commit_msg>Implement same&replace courses info crawler<commit_after>
|
import json
import getpass
import logging.config
from . import mysnu
from django.conf import settings
logging.config.dictConfig(settings.LOGGING)
def crawl():
userid = input('mySNU userid: ')
password = getpass.getpass('mySNU password: ')
session = mysnu.login(userid, password)
if session is None: # Fail to login
return None
with session as s:
same_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStd.action'
replace_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStdList.action'
params = {'cscLocale': 'ko_KR', 'strPgmCd': 'S030205'}
headers = {'Content-Type': 'application/extJs+sua; charset=UTF-8'}
payload = {
"SNU": {
"strSameSubstGrpFg": "",
"rowStatus": "insert",
"strDetaBussCd": "A0071",
"strSchyy": "2016",
"strShtmFg": "U000200002",
"strDetaShtmFg": "U000300002",
"strShtmDetaShtmFg": "U000200002U000300002",
"strBdegrSystemFg": "U000100001",
"strSbjtCd": "",
"strSbjtNm": ""
}
}
same_res = s.post(same_url, params=params, headers=headers, data=json.dumps(payload))
same_courses = json.loads(same_res.text)['GRD_COUR102']
same = [{'code': c['sbjtCd'], 'group': c['sameSubstGrpNo']} for c in same_courses]
replace_res = s.post(replace_url, params=params, headers=headers, data=json.dumps(payload))
replace_courses = json.loads(replace_res.text)['GRD_COUR102']
replace = [{'code_from': c['sbjtCd'], 'code_to': c['substSbjtCd']} for c in replace_courses]
return {'same': same, 'replace': replace}
|
Implement same&replace courses info crawlerimport json
import getpass
import logging.config
from . import mysnu
from django.conf import settings
logging.config.dictConfig(settings.LOGGING)
def crawl():
userid = input('mySNU userid: ')
password = getpass.getpass('mySNU password: ')
session = mysnu.login(userid, password)
if session is None: # Fail to login
return None
with session as s:
same_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStd.action'
replace_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStdList.action'
params = {'cscLocale': 'ko_KR', 'strPgmCd': 'S030205'}
headers = {'Content-Type': 'application/extJs+sua; charset=UTF-8'}
payload = {
"SNU": {
"strSameSubstGrpFg": "",
"rowStatus": "insert",
"strDetaBussCd": "A0071",
"strSchyy": "2016",
"strShtmFg": "U000200002",
"strDetaShtmFg": "U000300002",
"strShtmDetaShtmFg": "U000200002U000300002",
"strBdegrSystemFg": "U000100001",
"strSbjtCd": "",
"strSbjtNm": ""
}
}
same_res = s.post(same_url, params=params, headers=headers, data=json.dumps(payload))
same_courses = json.loads(same_res.text)['GRD_COUR102']
same = [{'code': c['sbjtCd'], 'group': c['sameSubstGrpNo']} for c in same_courses]
replace_res = s.post(replace_url, params=params, headers=headers, data=json.dumps(payload))
replace_courses = json.loads(replace_res.text)['GRD_COUR102']
replace = [{'code_from': c['sbjtCd'], 'code_to': c['substSbjtCd']} for c in replace_courses]
return {'same': same, 'replace': replace}
|
<commit_before><commit_msg>Implement same&replace courses info crawler<commit_after>import json
import getpass
import logging.config
from . import mysnu
from django.conf import settings
logging.config.dictConfig(settings.LOGGING)
def crawl():
userid = input('mySNU userid: ')
password = getpass.getpass('mySNU password: ')
session = mysnu.login(userid, password)
if session is None: # Fail to login
return None
with session as s:
same_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStd.action'
replace_url = 'https://shine.snu.ac.kr/uni/uni/cour/curr/findSameSubstSbjtInqStdList.action'
params = {'cscLocale': 'ko_KR', 'strPgmCd': 'S030205'}
headers = {'Content-Type': 'application/extJs+sua; charset=UTF-8'}
payload = {
"SNU": {
"strSameSubstGrpFg": "",
"rowStatus": "insert",
"strDetaBussCd": "A0071",
"strSchyy": "2016",
"strShtmFg": "U000200002",
"strDetaShtmFg": "U000300002",
"strShtmDetaShtmFg": "U000200002U000300002",
"strBdegrSystemFg": "U000100001",
"strSbjtCd": "",
"strSbjtNm": ""
}
}
same_res = s.post(same_url, params=params, headers=headers, data=json.dumps(payload))
same_courses = json.loads(same_res.text)['GRD_COUR102']
same = [{'code': c['sbjtCd'], 'group': c['sameSubstGrpNo']} for c in same_courses]
replace_res = s.post(replace_url, params=params, headers=headers, data=json.dumps(payload))
replace_courses = json.loads(replace_res.text)['GRD_COUR102']
replace = [{'code_from': c['sbjtCd'], 'code_to': c['substSbjtCd']} for c in replace_courses]
return {'same': same, 'replace': replace}
|
|
035e107af64549c4ad39084e36e6bd2263ee3e02
|
tools/perf/measurements/record_per_area.py
|
tools/perf/measurements/record_per_area.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import smoothness
from telemetry.core import util
from telemetry.page import page_measurement
class RecordPerArea(page_measurement.PageMeasurement):
def __init__(self):
super(RecordPerArea, self).__init__('', True)
def AddCommandLineOptions(self, parser):
parser.add_option('--start-wait-time', dest='start_wait_time',
default=2,
help='Wait time before the benchmark is started ' +
'(must be long enought to load all content)')
def CustomizeBrowserOptions(self, options):
smoothness.SmoothnessMetrics.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def MeasurePage(self, page, tab, results):
# Wait until the page has loaded and come to a somewhat steady state.
# Needs to be adjusted for every device (~2 seconds for workstation).
time.sleep(float(self.options.start_wait_time))
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
chrome.gpuBenchmarking.runMicroBenchmark(
"picture_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
});
""")
def _IsDone():
return tab.EvaluateJavaScript(
'window.benchmark_results.done', timeout=120)
util.WaitFor(_IsDone, timeout=120)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
|
Add record per area measurement.
|
telemetry: Add record per area measurement.
This patch adds a record per area measurement which hooks into
picture record microbenchmark.
R=nduca@chromium.org
NOTRY=True
Review URL: https://codereview.chromium.org/27051005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@228801 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
dushu1203/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,patrickm/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,patrickm/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,anirudhSK/chromium,patrickm/chromium.src,Just-D/chromium-1,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,jaruba/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,anirudhSK/chromium,ondra-novak/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,fujunwei/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,littlstar/chromium.src,littlstar/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,Chilledheart/chromium,M4sse/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,bright-sparks/chromium-spacewalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,littlstar/chromium.src,ondra-novak/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,M4sse/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,jaruba/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,hgl888/chromium-crosswalk,ltilve/chromium,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,anirudhSK/chromium,ChromiumWebApps/chromium,patrickm/chromium.src,M4sse/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,markYoungH/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,M4sse/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Jonekee/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src
|
telemetry: Add record per area measurement.
This patch adds a record per area measurement which hooks into
picture record microbenchmark.
R=nduca@chromium.org
NOTRY=True
Review URL: https://codereview.chromium.org/27051005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@228801 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import smoothness
from telemetry.core import util
from telemetry.page import page_measurement
class RecordPerArea(page_measurement.PageMeasurement):
def __init__(self):
super(RecordPerArea, self).__init__('', True)
def AddCommandLineOptions(self, parser):
parser.add_option('--start-wait-time', dest='start_wait_time',
default=2,
help='Wait time before the benchmark is started ' +
'(must be long enought to load all content)')
def CustomizeBrowserOptions(self, options):
smoothness.SmoothnessMetrics.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def MeasurePage(self, page, tab, results):
# Wait until the page has loaded and come to a somewhat steady state.
# Needs to be adjusted for every device (~2 seconds for workstation).
time.sleep(float(self.options.start_wait_time))
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
chrome.gpuBenchmarking.runMicroBenchmark(
"picture_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
});
""")
def _IsDone():
return tab.EvaluateJavaScript(
'window.benchmark_results.done', timeout=120)
util.WaitFor(_IsDone, timeout=120)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
|
<commit_before><commit_msg>telemetry: Add record per area measurement.
This patch adds a record per area measurement which hooks into
picture record microbenchmark.
R=nduca@chromium.org
NOTRY=True
Review URL: https://codereview.chromium.org/27051005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@228801 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import smoothness
from telemetry.core import util
from telemetry.page import page_measurement
class RecordPerArea(page_measurement.PageMeasurement):
def __init__(self):
super(RecordPerArea, self).__init__('', True)
def AddCommandLineOptions(self, parser):
parser.add_option('--start-wait-time', dest='start_wait_time',
default=2,
help='Wait time before the benchmark is started ' +
'(must be long enought to load all content)')
def CustomizeBrowserOptions(self, options):
smoothness.SmoothnessMetrics.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def MeasurePage(self, page, tab, results):
# Wait until the page has loaded and come to a somewhat steady state.
# Needs to be adjusted for every device (~2 seconds for workstation).
time.sleep(float(self.options.start_wait_time))
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
chrome.gpuBenchmarking.runMicroBenchmark(
"picture_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
});
""")
def _IsDone():
return tab.EvaluateJavaScript(
'window.benchmark_results.done', timeout=120)
util.WaitFor(_IsDone, timeout=120)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
|
telemetry: Add record per area measurement.
This patch adds a record per area measurement which hooks into
picture record microbenchmark.
R=nduca@chromium.org
NOTRY=True
Review URL: https://codereview.chromium.org/27051005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@228801 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import smoothness
from telemetry.core import util
from telemetry.page import page_measurement
class RecordPerArea(page_measurement.PageMeasurement):
def __init__(self):
super(RecordPerArea, self).__init__('', True)
def AddCommandLineOptions(self, parser):
parser.add_option('--start-wait-time', dest='start_wait_time',
default=2,
help='Wait time before the benchmark is started ' +
'(must be long enought to load all content)')
def CustomizeBrowserOptions(self, options):
smoothness.SmoothnessMetrics.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def MeasurePage(self, page, tab, results):
# Wait until the page has loaded and come to a somewhat steady state.
# Needs to be adjusted for every device (~2 seconds for workstation).
time.sleep(float(self.options.start_wait_time))
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
chrome.gpuBenchmarking.runMicroBenchmark(
"picture_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
});
""")
def _IsDone():
return tab.EvaluateJavaScript(
'window.benchmark_results.done', timeout=120)
util.WaitFor(_IsDone, timeout=120)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
|
<commit_before><commit_msg>telemetry: Add record per area measurement.
This patch adds a record per area measurement which hooks into
picture record microbenchmark.
R=nduca@chromium.org
NOTRY=True
Review URL: https://codereview.chromium.org/27051005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@228801 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from metrics import smoothness
from telemetry.core import util
from telemetry.page import page_measurement
class RecordPerArea(page_measurement.PageMeasurement):
def __init__(self):
super(RecordPerArea, self).__init__('', True)
def AddCommandLineOptions(self, parser):
parser.add_option('--start-wait-time', dest='start_wait_time',
default=2,
help='Wait time before the benchmark is started ' +
'(must be long enought to load all content)')
def CustomizeBrowserOptions(self, options):
smoothness.SmoothnessMetrics.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--enable-impl-side-painting',
'--force-compositing-mode',
'--enable-threaded-compositing',
'--enable-gpu-benchmarking'
])
def MeasurePage(self, page, tab, results):
# Wait until the page has loaded and come to a somewhat steady state.
# Needs to be adjusted for every device (~2 seconds for workstation).
time.sleep(float(self.options.start_wait_time))
# Enqueue benchmark
tab.ExecuteJavaScript("""
window.benchmark_results = {};
window.benchmark_results.done = false;
chrome.gpuBenchmarking.runMicroBenchmark(
"picture_record_benchmark",
function(value) {
window.benchmark_results.done = true;
window.benchmark_results.results = value;
});
""")
def _IsDone():
return tab.EvaluateJavaScript(
'window.benchmark_results.done', timeout=120)
util.WaitFor(_IsDone, timeout=120)
all_data = tab.EvaluateJavaScript('window.benchmark_results.results')
for data in all_data:
results.Add('time_for_area_%07d' % (data['area']), 'ms', data['time_ms'])
|
|
2a1adeaa61e61531f8f69a459b098b4ecf147941
|
tornado/test/__init__.py
|
tornado/test/__init__.py
|
import asyncio
import sys
# Use the selector event loop on windows. Do this in tornado/test/__init__.py
# instead of runtests.py so it happens no matter how the test is run (such as
# through editor integrations).
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
|
Use selector event loop on windows.
|
test: Use selector event loop on windows.
This gets most of the tests working again on windows with py38.
|
Python
|
apache-2.0
|
bdarnell/tornado,dongpinglai/my_tornado,tornadoweb/tornado,bdarnell/tornado,bdarnell/tornado,lilydjwg/tornado,tornadoweb/tornado,dongpinglai/my_tornado,allenl203/tornado,bdarnell/tornado,mivade/tornado,allenl203/tornado,allenl203/tornado,mivade/tornado,allenl203/tornado,mivade/tornado,tornadoweb/tornado,lilydjwg/tornado,lilydjwg/tornado,mivade/tornado,bdarnell/tornado,mivade/tornado,lilydjwg/tornado,allenl203/tornado,dongpinglai/my_tornado,dongpinglai/my_tornado,tornadoweb/tornado,dongpinglai/my_tornado,dongpinglai/my_tornado
|
test: Use selector event loop on windows.
This gets most of the tests working again on windows with py38.
|
import asyncio
import sys
# Use the selector event loop on windows. Do this in tornado/test/__init__.py
# instead of runtests.py so it happens no matter how the test is run (such as
# through editor integrations).
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
|
<commit_before><commit_msg>test: Use selector event loop on windows.
This gets most of the tests working again on windows with py38.<commit_after>
|
import asyncio
import sys
# Use the selector event loop on windows. Do this in tornado/test/__init__.py
# instead of runtests.py so it happens no matter how the test is run (such as
# through editor integrations).
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
|
test: Use selector event loop on windows.
This gets most of the tests working again on windows with py38.import asyncio
import sys
# Use the selector event loop on windows. Do this in tornado/test/__init__.py
# instead of runtests.py so it happens no matter how the test is run (such as
# through editor integrations).
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
|
<commit_before><commit_msg>test: Use selector event loop on windows.
This gets most of the tests working again on windows with py38.<commit_after>import asyncio
import sys
# Use the selector event loop on windows. Do this in tornado/test/__init__.py
# instead of runtests.py so it happens no matter how the test is run (such as
# through editor integrations).
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore
|
|
00ec3ae8f6d51d393b08e3645d639619106aec67
|
migrations/versions/0366_letter_rates_2022.py
|
migrations/versions/0366_letter_rates_2022.py
|
"""
Revision ID: 0366_letter_rates_2022
Revises: 0365_add_nhs_branding
Create Date: 2022-03-01 14:00:00
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0366_letter_rates_2022'
down_revision = '0365_add_nhs_branding'
CHANGEOVER_DATE = datetime(2022, 3, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 36,
'first': 58,
'europe': 88,
'rest-of-world': 88,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
Add new letter rates for March 1, 2022.
|
Add new letter rates for March 1, 2022.
- second class postage will go up by 2 pence, plus VAT
- international postage will go up by 7 pence, plus VAT
- first class postage will go down by 6 pence, plus VAT
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add new letter rates for March 1, 2022.
- second class postage will go up by 2 pence, plus VAT
- international postage will go up by 7 pence, plus VAT
- first class postage will go down by 6 pence, plus VAT
|
"""
Revision ID: 0366_letter_rates_2022
Revises: 0365_add_nhs_branding
Create Date: 2022-03-01 14:00:00
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0366_letter_rates_2022'
down_revision = '0365_add_nhs_branding'
CHANGEOVER_DATE = datetime(2022, 3, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 36,
'first': 58,
'europe': 88,
'rest-of-world': 88,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
<commit_before><commit_msg>Add new letter rates for March 1, 2022.
- second class postage will go up by 2 pence, plus VAT
- international postage will go up by 7 pence, plus VAT
- first class postage will go down by 6 pence, plus VAT<commit_after>
|
"""
Revision ID: 0366_letter_rates_2022
Revises: 0365_add_nhs_branding
Create Date: 2022-03-01 14:00:00
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0366_letter_rates_2022'
down_revision = '0365_add_nhs_branding'
CHANGEOVER_DATE = datetime(2022, 3, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 36,
'first': 58,
'europe': 88,
'rest-of-world': 88,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
Add new letter rates for March 1, 2022.
- second class postage will go up by 2 pence, plus VAT
- international postage will go up by 7 pence, plus VAT
- first class postage will go down by 6 pence, plus VAT"""
Revision ID: 0366_letter_rates_2022
Revises: 0365_add_nhs_branding
Create Date: 2022-03-01 14:00:00
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0366_letter_rates_2022'
down_revision = '0365_add_nhs_branding'
CHANGEOVER_DATE = datetime(2022, 3, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 36,
'first': 58,
'europe': 88,
'rest-of-world': 88,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
<commit_before><commit_msg>Add new letter rates for March 1, 2022.
- second class postage will go up by 2 pence, plus VAT
- international postage will go up by 7 pence, plus VAT
- first class postage will go down by 6 pence, plus VAT<commit_after>"""
Revision ID: 0366_letter_rates_2022
Revises: 0365_add_nhs_branding
Create Date: 2022-03-01 14:00:00
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0366_letter_rates_2022'
down_revision = '0365_add_nhs_branding'
CHANGEOVER_DATE = datetime(2022, 3, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 36,
'first': 58,
'europe': 88,
'rest-of-world': 88,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
|
a7339ba4c825e893e043ac9aefac55e3f0c939aa
|
students/exceptions.py
|
students/exceptions.py
|
import json
class ClientError(Exception):
def __init__(self, code):
super(ClientError, self).__init__(code)
self.code = code
def send_to(self, channel):
channel.send({
"text": json.dumps({
"error": self.code,
}),
})
|
Add ClientError exception to handle sending back web socket errors to the client.
|
Add ClientError exception to handle sending back web socket errors to the client.
|
Python
|
mit
|
muhummadPatel/raspied,muhummadPatel/raspied,muhummadPatel/raspied
|
Add ClientError exception to handle sending back web socket errors to the client.
|
import json
class ClientError(Exception):
def __init__(self, code):
super(ClientError, self).__init__(code)
self.code = code
def send_to(self, channel):
channel.send({
"text": json.dumps({
"error": self.code,
}),
})
|
<commit_before><commit_msg>Add ClientError exception to handle sending back web socket errors to the client.<commit_after>
|
import json
class ClientError(Exception):
def __init__(self, code):
super(ClientError, self).__init__(code)
self.code = code
def send_to(self, channel):
channel.send({
"text": json.dumps({
"error": self.code,
}),
})
|
Add ClientError exception to handle sending back web socket errors to the client.import json
class ClientError(Exception):
def __init__(self, code):
super(ClientError, self).__init__(code)
self.code = code
def send_to(self, channel):
channel.send({
"text": json.dumps({
"error": self.code,
}),
})
|
<commit_before><commit_msg>Add ClientError exception to handle sending back web socket errors to the client.<commit_after>import json
class ClientError(Exception):
def __init__(self, code):
super(ClientError, self).__init__(code)
self.code = code
def send_to(self, channel):
channel.send({
"text": json.dumps({
"error": self.code,
}),
})
|
|
700b19e4fe55ef57935b70d90883c0d0451c163a
|
locations/spiders/xpo_logistics.py
|
locations/spiders/xpo_logistics.py
|
# -*- coding: utf-8 -*-
import scrapy
import re
import ast
from locations.items import GeojsonPointItem
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
data = ast.literal_eval(data)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
ref=store['office_name'],
addr_full=store['street'],
city=store['city'],
state=store['state'],
postcode=store['postal_code'],
country=store['country'],
name=store['office_name']
)
|
Add spider for XPO Logistics
|
Add spider for XPO Logistics
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for XPO Logistics
|
# -*- coding: utf-8 -*-
import scrapy
import re
import ast
from locations.items import GeojsonPointItem
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
data = ast.literal_eval(data)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
ref=store['office_name'],
addr_full=store['street'],
city=store['city'],
state=store['state'],
postcode=store['postal_code'],
country=store['country'],
name=store['office_name']
)
|
<commit_before><commit_msg>Add spider for XPO Logistics<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
import re
import ast
from locations.items import GeojsonPointItem
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
data = ast.literal_eval(data)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
ref=store['office_name'],
addr_full=store['street'],
city=store['city'],
state=store['state'],
postcode=store['postal_code'],
country=store['country'],
name=store['office_name']
)
|
Add spider for XPO Logistics# -*- coding: utf-8 -*-
import scrapy
import re
import ast
from locations.items import GeojsonPointItem
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
data = ast.literal_eval(data)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
ref=store['office_name'],
addr_full=store['street'],
city=store['city'],
state=store['state'],
postcode=store['postal_code'],
country=store['country'],
name=store['office_name']
)
|
<commit_before><commit_msg>Add spider for XPO Logistics<commit_after># -*- coding: utf-8 -*-
import scrapy
import re
import ast
from locations.items import GeojsonPointItem
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
data = ast.literal_eval(data)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
ref=store['office_name'],
addr_full=store['street'],
city=store['city'],
state=store['state'],
postcode=store['postal_code'],
country=store['country'],
name=store['office_name']
)
|
|
d5f4a57d3be9f27d80ca037aa7ce6d4576852cfb
|
py/find-k-pairs-with-smallest-sums.py
|
py/find-k-pairs-with-smallest-sums.py
|
import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
l1, l2 = len(nums1), len(nums2)
if l1 * l2 <= k:
return sorted([[n1, n2] for n1 in nums1 for n2 in nums2], key=lambda x:x[0] + x[1])
swapped = False
heap = []
for i in xrange(min(l1, k)):
heap.append([nums1[i] + nums2[0], [i, 0]])
ans = []
heapq.heapify(heap)
while len(ans) < k:
s, idx = heapq.heappop(heap)
idx1, idx2 = idx
ans.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < l2:
idx2 += 1
s = nums1[idx1] + nums2[idx2]
heapq.heappush(heap, [s, [idx1, idx2]])
return ans
|
Add py solution for 373. Find K Pairs with Smallest Sums
|
Add py solution for 373. Find K Pairs with Smallest Sums
373. Find K Pairs with Smallest Sums: https://leetcode.com/problems/find-k-pairs-with-smallest-sums/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 373. Find K Pairs with Smallest Sums
373. Find K Pairs with Smallest Sums: https://leetcode.com/problems/find-k-pairs-with-smallest-sums/
|
import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
l1, l2 = len(nums1), len(nums2)
if l1 * l2 <= k:
return sorted([[n1, n2] for n1 in nums1 for n2 in nums2], key=lambda x:x[0] + x[1])
swapped = False
heap = []
for i in xrange(min(l1, k)):
heap.append([nums1[i] + nums2[0], [i, 0]])
ans = []
heapq.heapify(heap)
while len(ans) < k:
s, idx = heapq.heappop(heap)
idx1, idx2 = idx
ans.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < l2:
idx2 += 1
s = nums1[idx1] + nums2[idx2]
heapq.heappush(heap, [s, [idx1, idx2]])
return ans
|
<commit_before><commit_msg>Add py solution for 373. Find K Pairs with Smallest Sums
373. Find K Pairs with Smallest Sums: https://leetcode.com/problems/find-k-pairs-with-smallest-sums/<commit_after>
|
import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
l1, l2 = len(nums1), len(nums2)
if l1 * l2 <= k:
return sorted([[n1, n2] for n1 in nums1 for n2 in nums2], key=lambda x:x[0] + x[1])
swapped = False
heap = []
for i in xrange(min(l1, k)):
heap.append([nums1[i] + nums2[0], [i, 0]])
ans = []
heapq.heapify(heap)
while len(ans) < k:
s, idx = heapq.heappop(heap)
idx1, idx2 = idx
ans.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < l2:
idx2 += 1
s = nums1[idx1] + nums2[idx2]
heapq.heappush(heap, [s, [idx1, idx2]])
return ans
|
Add py solution for 373. Find K Pairs with Smallest Sums
373. Find K Pairs with Smallest Sums: https://leetcode.com/problems/find-k-pairs-with-smallest-sums/import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
l1, l2 = len(nums1), len(nums2)
if l1 * l2 <= k:
return sorted([[n1, n2] for n1 in nums1 for n2 in nums2], key=lambda x:x[0] + x[1])
swapped = False
heap = []
for i in xrange(min(l1, k)):
heap.append([nums1[i] + nums2[0], [i, 0]])
ans = []
heapq.heapify(heap)
while len(ans) < k:
s, idx = heapq.heappop(heap)
idx1, idx2 = idx
ans.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < l2:
idx2 += 1
s = nums1[idx1] + nums2[idx2]
heapq.heappush(heap, [s, [idx1, idx2]])
return ans
|
<commit_before><commit_msg>Add py solution for 373. Find K Pairs with Smallest Sums
373. Find K Pairs with Smallest Sums: https://leetcode.com/problems/find-k-pairs-with-smallest-sums/<commit_after>import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
l1, l2 = len(nums1), len(nums2)
if l1 * l2 <= k:
return sorted([[n1, n2] for n1 in nums1 for n2 in nums2], key=lambda x:x[0] + x[1])
swapped = False
heap = []
for i in xrange(min(l1, k)):
heap.append([nums1[i] + nums2[0], [i, 0]])
ans = []
heapq.heapify(heap)
while len(ans) < k:
s, idx = heapq.heappop(heap)
idx1, idx2 = idx
ans.append([nums1[idx1], nums2[idx2]])
if idx2 + 1 < l2:
idx2 += 1
s = nums1[idx1] + nums2[idx2]
heapq.heappush(heap, [s, [idx1, idx2]])
return ans
|
|
fab1c6e8b935b5a4e81146e34c833ce66e05db0d
|
jupyterhub/generate_jupyter_secrets.py
|
jupyterhub/generate_jupyter_secrets.py
|
#!/usr/bin/env python
import binascii
import os
def random_hex(nb):
return binascii.hexlify(os.urandom(nb)).decode('ascii')
with open('jupyterhub.env', 'w') as f:
f.write('JPY_COOKIE_SECRET=%s\n' % random_hex(1024))
f.write('CONFIGPROXY_AUTH_TOKEN=%s\n' % random_hex(64))
|
Add script to generate jupyterhub secret vars.
|
Add script to generate jupyterhub secret vars.
|
Python
|
mit
|
Unidata/Unidata-Dockerfiles,Unidata/Unidata-Dockerfiles,julienchastang/Unidata-Dockerfiles,julienchastang/Unidata-Dockerfiles,Unidata/Unidata-Dockerfiles,julienchastang/Unidata-Dockerfiles
|
Add script to generate jupyterhub secret vars.
|
#!/usr/bin/env python
import binascii
import os
def random_hex(nb):
return binascii.hexlify(os.urandom(nb)).decode('ascii')
with open('jupyterhub.env', 'w') as f:
f.write('JPY_COOKIE_SECRET=%s\n' % random_hex(1024))
f.write('CONFIGPROXY_AUTH_TOKEN=%s\n' % random_hex(64))
|
<commit_before><commit_msg>Add script to generate jupyterhub secret vars.<commit_after>
|
#!/usr/bin/env python
import binascii
import os
def random_hex(nb):
return binascii.hexlify(os.urandom(nb)).decode('ascii')
with open('jupyterhub.env', 'w') as f:
f.write('JPY_COOKIE_SECRET=%s\n' % random_hex(1024))
f.write('CONFIGPROXY_AUTH_TOKEN=%s\n' % random_hex(64))
|
Add script to generate jupyterhub secret vars.#!/usr/bin/env python
import binascii
import os
def random_hex(nb):
return binascii.hexlify(os.urandom(nb)).decode('ascii')
with open('jupyterhub.env', 'w') as f:
f.write('JPY_COOKIE_SECRET=%s\n' % random_hex(1024))
f.write('CONFIGPROXY_AUTH_TOKEN=%s\n' % random_hex(64))
|
<commit_before><commit_msg>Add script to generate jupyterhub secret vars.<commit_after>#!/usr/bin/env python
import binascii
import os
def random_hex(nb):
return binascii.hexlify(os.urandom(nb)).decode('ascii')
with open('jupyterhub.env', 'w') as f:
f.write('JPY_COOKIE_SECRET=%s\n' % random_hex(1024))
f.write('CONFIGPROXY_AUTH_TOKEN=%s\n' % random_hex(64))
|
|
4e891bae265599768ce28788bbd44978674202e5
|
cptm/manifestoproject2cpt_input.py
|
cptm/manifestoproject2cpt_input.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
p = Perspective('', pos_topic_words(), pos_opinion_words())
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
except Exception, e:
logger.warn(str(e))
del frogclient
frogclient = get_frogclient()
logger.info('parsing pseudo sentences instead')
for text in df['content']:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = os.path.basename(data_file).replace('.csv', '.txt')
p.write2file(dir_out, file_name)
|
Add script to generate cptm corpus for manifesto data
|
Add script to generate cptm corpus for manifesto data
The script uses frog to parse text.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to generate cptm corpus for manifesto data
The script uses frog to parse text.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
p = Perspective('', pos_topic_words(), pos_opinion_words())
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
except Exception, e:
logger.warn(str(e))
del frogclient
frogclient = get_frogclient()
logger.info('parsing pseudo sentences instead')
for text in df['content']:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = os.path.basename(data_file).replace('.csv', '.txt')
p.write2file(dir_out, file_name)
|
<commit_before><commit_msg>Add script to generate cptm corpus for manifesto data
The script uses frog to parse text.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
p = Perspective('', pos_topic_words(), pos_opinion_words())
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
except Exception, e:
logger.warn(str(e))
del frogclient
frogclient = get_frogclient()
logger.info('parsing pseudo sentences instead')
for text in df['content']:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = os.path.basename(data_file).replace('.csv', '.txt')
p.write2file(dir_out, file_name)
|
Add script to generate cptm corpus for manifesto data
The script uses frog to parse text.#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
p = Perspective('', pos_topic_words(), pos_opinion_words())
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
except Exception, e:
logger.warn(str(e))
del frogclient
frogclient = get_frogclient()
logger.info('parsing pseudo sentences instead')
for text in df['content']:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = os.path.basename(data_file).replace('.csv', '.txt')
p.write2file(dir_out, file_name)
|
<commit_before><commit_msg>Add script to generate cptm corpus for manifesto data
The script uses frog to parse text.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import logging
import argparse
import os
import glob
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(manifesto project csv files)')
parser.add_argument('dir_out', help='the name of the dir where the '
'CPT corpus should be saved.')
args = parser.parse_args()
dir_in = args.dir_in
dir_out = args.dir_out
frogclient = get_frogclient()
if not os.path.exists(dir_out):
os.makedirs(dir_out)
p = Perspective('', pos_topic_words(), pos_opinion_words())
data_files = glob.glob('{}/*.csv'.format(dir_in))
for i, data_file in enumerate(data_files):
if i % 5 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(data_files)))
df = pd.read_csv(data_file, encoding='utf-8')
text = ' '.join([line for line in df['content']])
try:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
except Exception, e:
logger.warn(str(e))
del frogclient
frogclient = get_frogclient()
logger.info('parsing pseudo sentences instead')
for text in df['content']:
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = os.path.basename(data_file).replace('.csv', '.txt')
p.write2file(dir_out, file_name)
|
|
bb3255cba6452d4f646e84fcdd986b1aeb16d20d
|
test/_mysqldb_test.py
|
test/_mysqldb_test.py
|
'''
$ mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 211
Server version: 5.6.15 Homebrew
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database luigi;
Query OK, 1 row affected (0.00 sec)
'''
import mysql.connector
from luigi.contrib.mysqldb import MySqlTarget
import unittest
host = 'localhost'
port = 3306
database = 'luigi_test'
username = None
password = None
table_updates = 'table_updates'
def _create_test_database():
con = mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
autocommit=True)
con.cursor().execute('CREATE DATABASE IF NOT EXISTS %s' % database)
_create_test_database()
target = MySqlTarget(host, database, username, password, '', 'update_id')
class MySqlTargetTest(unittest.TestCase):
def test_touch_and_exists(self):
drop()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
def drop():
con = target.connect(autocommit=True)
con.cursor().execute('DROP TABLE IF EXISTS %s' % table_updates)
|
Add basic testing for the mysql target
|
Add basic testing for the mysql target
|
Python
|
apache-2.0
|
linearregression/luigi,h3biomed/luigi,meyerson/luigi,neilisaac/luigi,samepage-labs/luigi,ivannotes/luigi,JackDanger/luigi,fw1121/luigi,ivannotes/luigi,PeteW/luigi,percyfal/luigi,ZhenxingWu/luigi,jamesmcm/luigi,mbruggmann/luigi,altaf-ali/luigi,joeshaw/luigi,samuell/luigi,Dawny33/luigi,hadesbox/luigi,realgo/luigi,dstandish/luigi,edx/luigi,moritzschaefer/luigi,wakamori/luigi,qpxu007/luigi,ZhenxingWu/luigi,pkexcellent/luigi,moritzschaefer/luigi,graingert/luigi,joeshaw/luigi,linearregression/luigi,slvnperron/luigi,foursquare/luigi,soxofaan/luigi,ContextLogic/luigi,huiyi1990/luigi,JackDanger/luigi,fw1121/luigi,kevhill/luigi,ViaSat/luigi,huiyi1990/luigi,spotify/luigi,dlstadther/luigi,kevhill/luigi,anyman/luigi,gpoulin/luigi,wakamori/luigi,humanlongevity/luigi,springcoil/luigi,laserson/luigi,foursquare/luigi,moritzschaefer/luigi,adaitche/luigi,stephenpascoe/luigi,hellais/luigi,dkroy/luigi,ivannotes/luigi,samepage-labs/luigi,ZhenxingWu/luigi,glenndmello/luigi,riga/luigi,fabriziodemaria/luigi,oldpa/luigi,Magnetic/luigi,ThQ/luigi,humanlongevity/luigi,mbruggmann/luigi,dhruvg/luigi,neilisaac/luigi,jw0201/luigi,Magnetic/luigi,samuell/luigi,meyerson/luigi,alkemics/luigi,springcoil/luigi,walkers-mv/luigi,percyfal/luigi,Wattpad/luigi,wakamori/luigi,rizzatti/luigi,fw1121/luigi,sahitya-pavurala/luigi,kalaidin/luigi,anyman/luigi,qpxu007/luigi,stephenpascoe/luigi,casey-green/luigi,Tarrasch/luigi,bmaggard/luigi,upworthy/luigi,drincruz/luigi,soxofaan/luigi,lungetech/luigi,hadesbox/luigi,Tarrasch/luigi,gpoulin/luigi,belevtsoff/luigi,torypages/luigi,penelopy/luigi,alkemics/luigi,bowlofstew/luigi,DomainGroupOSS/luigi,linsomniac/luigi,pkexcellent/luigi,lungetech/luigi,gpoulin/luigi,ChrisBeaumont/luigi,Magnetic/luigi,cpcloud/luigi,springcoil/luigi,dhruvg/luigi,h3biomed/luigi,Tarrasch/luigi,ViaSat/luigi,fabriziodemaria/luigi,hadesbox/luigi,edx/luigi,drincruz/luigi,SeedScientific/luigi,mbruggmann/luigi,republic-analytics/luigi,ivannotes/luigi,percyfal/luigi,glenndmello/luigi,dhruvg/luigi,dylanjbarth/luigi,adaitche/luigi,linsomniac/luigi,ThQ/luigi,ViaSat/luigi,walkers-mv/luigi,penelopy/luigi,h3biomed/luigi,springcoil/luigi,joeshaw/luigi,ViaSat/luigi,anyman/luigi,qpxu007/luigi,moritzschaefer/luigi,vine/luigi,vine/luigi,DomainGroupOSS/luigi,SeedScientific/luigi,laserson/luigi,fabriziodemaria/luigi,leafjungle/luigi,glenndmello/luigi,dlstadther/luigi,dkroy/luigi,aeron15/luigi,thejens/luigi,humanlongevity/luigi,stroykova/luigi,belevtsoff/luigi,laserson/luigi,SeedScientific/luigi,PeteW/luigi,javrasya/luigi,samuell/luigi,realgo/luigi,sahitya-pavurala/luigi,oldpa/luigi,ContextLogic/luigi,PeteW/luigi,mfcabrera/luigi,percyfal/luigi,moandcompany/luigi,dkroy/luigi,pkexcellent/luigi,samuell/luigi,spotify/luigi,jw0201/luigi,JackDanger/luigi,belevtsoff/luigi,adaitche/luigi,Yoone/luigi,laserson/luigi,ehdr/luigi,huiyi1990/luigi,rayrrr/luigi,stroykova/luigi,hellais/luigi,soxofaan/luigi,thejens/luigi,kevhill/luigi,ChrisBeaumont/luigi,foursquare/luigi,torypages/luigi,neilisaac/luigi,17zuoye/luigi,harveyxia/luigi,ChrisBeaumont/luigi,sahitya-pavurala/luigi,slvnperron/luigi,theoryno3/luigi,mfcabrera/luigi,lungetech/luigi,DomainGroupOSS/luigi,17zuoye/luigi,sahitya-pavurala/luigi,lichia/luigi,javrasya/luigi,bmaggard/luigi,penelopy/luigi,PeteW/luigi,aeron15/luigi,dstandish/luigi,graingert/luigi,foursquare/luigi,kalaidin/luigi,belevtsoff/luigi,SkyTruth/luigi,harveyxia/luigi,rayrrr/luigi,vine/luigi,republic-analytics/luigi,lichia/luigi,qpxu007/luigi,tuulos/luigi,lichia/luigi,bmaggard/luigi,upworthy/luigi,glenndmello/luigi,bowlofstew/luigi,graingert/luigi,altaf-ali/luigi,SkyTruth/luigi,moandcompany/luigi,dstandish/luigi,javrasya/luigi,thejens/luigi,ContextLogic/luigi,leafjungle/luigi,rizzatti/luigi,Yoone/luigi,moandcompany/luigi,linsomniac/luigi,kalaidin/luigi,SkyTruth/luigi,DomainGroupOSS/luigi,rizzatti/luigi,spotify/luigi,Houzz/luigi,soxofaan/luigi,ContextLogic/luigi,stephenpascoe/luigi,17zuoye/luigi,theoryno3/luigi,neilisaac/luigi,riga/luigi,ThQ/luigi,hellais/luigi,meyerson/luigi,stroykova/luigi,leafjungle/luigi,casey-green/luigi,rayrrr/luigi,adaitche/luigi,republic-analytics/luigi,spotify/luigi,Magnetic/luigi,oldpa/luigi,h3biomed/luigi,mfcabrera/luigi,stephenpascoe/luigi,walkers-mv/luigi,Dawny33/luigi,alkemics/luigi,graingert/luigi,jw0201/luigi,fabriziodemaria/luigi,Tarrasch/luigi,javrasya/luigi,harveyxia/luigi,bowlofstew/luigi,upworthy/luigi,torypages/luigi,moandcompany/luigi,altaf-ali/luigi,oldpa/luigi,realgo/luigi,huiyi1990/luigi,upworthy/luigi,aeron15/luigi,ehdr/luigi,lungetech/luigi,slvnperron/luigi,drincruz/luigi,Dawny33/luigi,Houzz/luigi,theoryno3/luigi,dylanjbarth/luigi,LamCiuLoeng/luigi,Dawny33/luigi,ehdr/luigi,jamesmcm/luigi,kevhill/luigi,linsomniac/luigi,LamCiuLoeng/luigi,theoryno3/luigi,joeshaw/luigi,aeron15/luigi,Wattpad/luigi,casey-green/luigi,dkroy/luigi,walkers-mv/luigi,edx/luigi,linearregression/luigi,samepage-labs/luigi,hellais/luigi,republic-analytics/luigi,drincruz/luigi,torypages/luigi,gpoulin/luigi,bowlofstew/luigi,jamesmcm/luigi,fw1121/luigi,kalaidin/luigi,dhruvg/luigi,LamCiuLoeng/luigi,Houzz/luigi,ZhenxingWu/luigi,meyerson/luigi,thejens/luigi,Houzz/luigi,Yoone/luigi,pkexcellent/luigi,rizzatti/luigi,tuulos/luigi,jamesmcm/luigi,riga/luigi,cpcloud/luigi,wakamori/luigi,SeedScientific/luigi,anyman/luigi,harveyxia/luigi,alkemics/luigi,vine/luigi,Yoone/luigi,stroykova/luigi,bmaggard/luigi,Wattpad/luigi,leafjungle/luigi,casey-green/luigi,mbruggmann/luigi,altaf-ali/luigi,riga/luigi,dlstadther/luigi,hadesbox/luigi,penelopy/luigi,17zuoye/luigi,dstandish/luigi,samepage-labs/luigi,tuulos/luigi,realgo/luigi,dylanjbarth/luigi,ehdr/luigi,LamCiuLoeng/luigi,dlstadther/luigi,edx/luigi,ChrisBeaumont/luigi,slvnperron/luigi,rayrrr/luigi,tuulos/luigi,lichia/luigi,humanlongevity/luigi,SkyTruth/luigi,dylanjbarth/luigi,JackDanger/luigi,mfcabrera/luigi,ThQ/luigi,linearregression/luigi,jw0201/luigi
|
Add basic testing for the mysql target
|
'''
$ mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 211
Server version: 5.6.15 Homebrew
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database luigi;
Query OK, 1 row affected (0.00 sec)
'''
import mysql.connector
from luigi.contrib.mysqldb import MySqlTarget
import unittest
host = 'localhost'
port = 3306
database = 'luigi_test'
username = None
password = None
table_updates = 'table_updates'
def _create_test_database():
con = mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
autocommit=True)
con.cursor().execute('CREATE DATABASE IF NOT EXISTS %s' % database)
_create_test_database()
target = MySqlTarget(host, database, username, password, '', 'update_id')
class MySqlTargetTest(unittest.TestCase):
def test_touch_and_exists(self):
drop()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
def drop():
con = target.connect(autocommit=True)
con.cursor().execute('DROP TABLE IF EXISTS %s' % table_updates)
|
<commit_before><commit_msg>Add basic testing for the mysql target<commit_after>
|
'''
$ mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 211
Server version: 5.6.15 Homebrew
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database luigi;
Query OK, 1 row affected (0.00 sec)
'''
import mysql.connector
from luigi.contrib.mysqldb import MySqlTarget
import unittest
host = 'localhost'
port = 3306
database = 'luigi_test'
username = None
password = None
table_updates = 'table_updates'
def _create_test_database():
con = mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
autocommit=True)
con.cursor().execute('CREATE DATABASE IF NOT EXISTS %s' % database)
_create_test_database()
target = MySqlTarget(host, database, username, password, '', 'update_id')
class MySqlTargetTest(unittest.TestCase):
def test_touch_and_exists(self):
drop()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
def drop():
con = target.connect(autocommit=True)
con.cursor().execute('DROP TABLE IF EXISTS %s' % table_updates)
|
Add basic testing for the mysql target'''
$ mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 211
Server version: 5.6.15 Homebrew
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database luigi;
Query OK, 1 row affected (0.00 sec)
'''
import mysql.connector
from luigi.contrib.mysqldb import MySqlTarget
import unittest
host = 'localhost'
port = 3306
database = 'luigi_test'
username = None
password = None
table_updates = 'table_updates'
def _create_test_database():
con = mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
autocommit=True)
con.cursor().execute('CREATE DATABASE IF NOT EXISTS %s' % database)
_create_test_database()
target = MySqlTarget(host, database, username, password, '', 'update_id')
class MySqlTargetTest(unittest.TestCase):
def test_touch_and_exists(self):
drop()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
def drop():
con = target.connect(autocommit=True)
con.cursor().execute('DROP TABLE IF EXISTS %s' % table_updates)
|
<commit_before><commit_msg>Add basic testing for the mysql target<commit_after>'''
$ mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 211
Server version: 5.6.15 Homebrew
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database luigi;
Query OK, 1 row affected (0.00 sec)
'''
import mysql.connector
from luigi.contrib.mysqldb import MySqlTarget
import unittest
host = 'localhost'
port = 3306
database = 'luigi_test'
username = None
password = None
table_updates = 'table_updates'
def _create_test_database():
con = mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
autocommit=True)
con.cursor().execute('CREATE DATABASE IF NOT EXISTS %s' % database)
_create_test_database()
target = MySqlTarget(host, database, username, password, '', 'update_id')
class MySqlTargetTest(unittest.TestCase):
def test_touch_and_exists(self):
drop()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
def drop():
con = target.connect(autocommit=True)
con.cursor().execute('DROP TABLE IF EXISTS %s' % table_updates)
|
|
fa9e0fe868d3abd9fde108b599131dd7196e5c45
|
open511_ui/static/i18n/generate_js.py
|
open511_ui/static/i18n/generate_js.py
|
# Requires that 'po2json' be installed and in $PATH
# http://search.cpan.org/~getty/Locale-Simple-0.011/bin/po2json
from glob import glob
import os
import re
import subprocess
JS_TEMPLATE = """window.O5 = window.O5 || {};
O5.i18n = new Jed({
locale_data: {
messages: %s
}
});
O5._t = function(s) { return O5.i18n.gettext(s); };
"""
I18N_DIR = os.path.dirname(os.path.realpath(__file__))
for po_filename in glob(I18N_DIR + "/*.po"):
json = subprocess.check_output(["po2json", po_filename])
with open(re.sub(r'\.po$', '.js', po_filename), 'w') as f:
f.write(JS_TEMPLATE % json)
|
Add script to generate JS translation files from PO
|
Add script to generate JS translation files from PO
|
Python
|
agpl-3.0
|
Open511/roadcast,Open511/roadcast,Open511/roadcast
|
Add script to generate JS translation files from PO
|
# Requires that 'po2json' be installed and in $PATH
# http://search.cpan.org/~getty/Locale-Simple-0.011/bin/po2json
from glob import glob
import os
import re
import subprocess
JS_TEMPLATE = """window.O5 = window.O5 || {};
O5.i18n = new Jed({
locale_data: {
messages: %s
}
});
O5._t = function(s) { return O5.i18n.gettext(s); };
"""
I18N_DIR = os.path.dirname(os.path.realpath(__file__))
for po_filename in glob(I18N_DIR + "/*.po"):
json = subprocess.check_output(["po2json", po_filename])
with open(re.sub(r'\.po$', '.js', po_filename), 'w') as f:
f.write(JS_TEMPLATE % json)
|
<commit_before><commit_msg>Add script to generate JS translation files from PO<commit_after>
|
# Requires that 'po2json' be installed and in $PATH
# http://search.cpan.org/~getty/Locale-Simple-0.011/bin/po2json
from glob import glob
import os
import re
import subprocess
JS_TEMPLATE = """window.O5 = window.O5 || {};
O5.i18n = new Jed({
locale_data: {
messages: %s
}
});
O5._t = function(s) { return O5.i18n.gettext(s); };
"""
I18N_DIR = os.path.dirname(os.path.realpath(__file__))
for po_filename in glob(I18N_DIR + "/*.po"):
json = subprocess.check_output(["po2json", po_filename])
with open(re.sub(r'\.po$', '.js', po_filename), 'w') as f:
f.write(JS_TEMPLATE % json)
|
Add script to generate JS translation files from PO# Requires that 'po2json' be installed and in $PATH
# http://search.cpan.org/~getty/Locale-Simple-0.011/bin/po2json
from glob import glob
import os
import re
import subprocess
JS_TEMPLATE = """window.O5 = window.O5 || {};
O5.i18n = new Jed({
locale_data: {
messages: %s
}
});
O5._t = function(s) { return O5.i18n.gettext(s); };
"""
I18N_DIR = os.path.dirname(os.path.realpath(__file__))
for po_filename in glob(I18N_DIR + "/*.po"):
json = subprocess.check_output(["po2json", po_filename])
with open(re.sub(r'\.po$', '.js', po_filename), 'w') as f:
f.write(JS_TEMPLATE % json)
|
<commit_before><commit_msg>Add script to generate JS translation files from PO<commit_after># Requires that 'po2json' be installed and in $PATH
# http://search.cpan.org/~getty/Locale-Simple-0.011/bin/po2json
from glob import glob
import os
import re
import subprocess
JS_TEMPLATE = """window.O5 = window.O5 || {};
O5.i18n = new Jed({
locale_data: {
messages: %s
}
});
O5._t = function(s) { return O5.i18n.gettext(s); };
"""
I18N_DIR = os.path.dirname(os.path.realpath(__file__))
for po_filename in glob(I18N_DIR + "/*.po"):
json = subprocess.check_output(["po2json", po_filename])
with open(re.sub(r'\.po$', '.js', po_filename), 'w') as f:
f.write(JS_TEMPLATE % json)
|
|
2506af6a57f2c7c7e01eb4cd5e53cd200d78f54f
|
tests/gallery_test.py
|
tests/gallery_test.py
|
from __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
from PIL import Image
from StringIO import StringIO
import os
import shutil
class GalleryTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.ass2m = Ass2m(self.root)
self.ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
os.mkdir(os.path.join(self.root, 'images'))
os.mkdir(os.path.join(self.root, 'images', 'nothing'))
with open(os.path.join(self.root, 'DESCRIPTION'), 'w') as f:
f.write('This is my awesome gallery!')
with open(os.path.join(self.root, 'images', 'notanimage.txt'), 'w') as f:
f.write('HELLO')
with open(os.path.join(self.root, 'images', 'image1.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (255, 0, 0))
img.save(f, 'jpeg')
with open(os.path.join(self.root, 'images', 'image2.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (0, 255, 0))
img.save(f, 'jpeg')
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDefaultView(self):
res = self.app.get('/images/')
assert '<h1>Index of /images</h1>' in res.body
assert '<img' not in res.body
assert 'nothing' in res.body
res1 = self.app.get('/images/?view=gallery')
f = self.ass2m.storage.get_file('/images')
f.view = 'gallery'
f.save()
res2 = self.app.get('/images/')
for res in (res1, res2):
assert '<h1>Gallery of /images</h1>' in res.body
assert '<img' in res.body
assert 'nothing' in res.body
def test_getThumbnail(self):
res = self.app.get('/images/image1.jpg?view=thumbnail')
img = Image.open(StringIO(res.body))
img.verify()
img = Image.open(StringIO(res.body))
assert img.size[0] < 1000
assert img.size[1] < 1000
res = self.app.get('/images/image1.jpg?view=thumbnail&thumb_size=42')
img = Image.open(StringIO(res.body))
assert img.size[0] == 42
assert img.size[1] == 42
|
Test for the gallery plugin
|
Test for the gallery plugin
|
Python
|
agpl-3.0
|
laurentb/assnet,laurentb/assnet
|
Test for the gallery plugin
|
from __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
from PIL import Image
from StringIO import StringIO
import os
import shutil
class GalleryTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.ass2m = Ass2m(self.root)
self.ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
os.mkdir(os.path.join(self.root, 'images'))
os.mkdir(os.path.join(self.root, 'images', 'nothing'))
with open(os.path.join(self.root, 'DESCRIPTION'), 'w') as f:
f.write('This is my awesome gallery!')
with open(os.path.join(self.root, 'images', 'notanimage.txt'), 'w') as f:
f.write('HELLO')
with open(os.path.join(self.root, 'images', 'image1.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (255, 0, 0))
img.save(f, 'jpeg')
with open(os.path.join(self.root, 'images', 'image2.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (0, 255, 0))
img.save(f, 'jpeg')
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDefaultView(self):
res = self.app.get('/images/')
assert '<h1>Index of /images</h1>' in res.body
assert '<img' not in res.body
assert 'nothing' in res.body
res1 = self.app.get('/images/?view=gallery')
f = self.ass2m.storage.get_file('/images')
f.view = 'gallery'
f.save()
res2 = self.app.get('/images/')
for res in (res1, res2):
assert '<h1>Gallery of /images</h1>' in res.body
assert '<img' in res.body
assert 'nothing' in res.body
def test_getThumbnail(self):
res = self.app.get('/images/image1.jpg?view=thumbnail')
img = Image.open(StringIO(res.body))
img.verify()
img = Image.open(StringIO(res.body))
assert img.size[0] < 1000
assert img.size[1] < 1000
res = self.app.get('/images/image1.jpg?view=thumbnail&thumb_size=42')
img = Image.open(StringIO(res.body))
assert img.size[0] == 42
assert img.size[1] == 42
|
<commit_before><commit_msg>Test for the gallery plugin<commit_after>
|
from __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
from PIL import Image
from StringIO import StringIO
import os
import shutil
class GalleryTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.ass2m = Ass2m(self.root)
self.ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
os.mkdir(os.path.join(self.root, 'images'))
os.mkdir(os.path.join(self.root, 'images', 'nothing'))
with open(os.path.join(self.root, 'DESCRIPTION'), 'w') as f:
f.write('This is my awesome gallery!')
with open(os.path.join(self.root, 'images', 'notanimage.txt'), 'w') as f:
f.write('HELLO')
with open(os.path.join(self.root, 'images', 'image1.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (255, 0, 0))
img.save(f, 'jpeg')
with open(os.path.join(self.root, 'images', 'image2.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (0, 255, 0))
img.save(f, 'jpeg')
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDefaultView(self):
res = self.app.get('/images/')
assert '<h1>Index of /images</h1>' in res.body
assert '<img' not in res.body
assert 'nothing' in res.body
res1 = self.app.get('/images/?view=gallery')
f = self.ass2m.storage.get_file('/images')
f.view = 'gallery'
f.save()
res2 = self.app.get('/images/')
for res in (res1, res2):
assert '<h1>Gallery of /images</h1>' in res.body
assert '<img' in res.body
assert 'nothing' in res.body
def test_getThumbnail(self):
res = self.app.get('/images/image1.jpg?view=thumbnail')
img = Image.open(StringIO(res.body))
img.verify()
img = Image.open(StringIO(res.body))
assert img.size[0] < 1000
assert img.size[1] < 1000
res = self.app.get('/images/image1.jpg?view=thumbnail&thumb_size=42')
img = Image.open(StringIO(res.body))
assert img.size[0] == 42
assert img.size[1] == 42
|
Test for the gallery pluginfrom __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
from PIL import Image
from StringIO import StringIO
import os
import shutil
class GalleryTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.ass2m = Ass2m(self.root)
self.ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
os.mkdir(os.path.join(self.root, 'images'))
os.mkdir(os.path.join(self.root, 'images', 'nothing'))
with open(os.path.join(self.root, 'DESCRIPTION'), 'w') as f:
f.write('This is my awesome gallery!')
with open(os.path.join(self.root, 'images', 'notanimage.txt'), 'w') as f:
f.write('HELLO')
with open(os.path.join(self.root, 'images', 'image1.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (255, 0, 0))
img.save(f, 'jpeg')
with open(os.path.join(self.root, 'images', 'image2.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (0, 255, 0))
img.save(f, 'jpeg')
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDefaultView(self):
res = self.app.get('/images/')
assert '<h1>Index of /images</h1>' in res.body
assert '<img' not in res.body
assert 'nothing' in res.body
res1 = self.app.get('/images/?view=gallery')
f = self.ass2m.storage.get_file('/images')
f.view = 'gallery'
f.save()
res2 = self.app.get('/images/')
for res in (res1, res2):
assert '<h1>Gallery of /images</h1>' in res.body
assert '<img' in res.body
assert 'nothing' in res.body
def test_getThumbnail(self):
res = self.app.get('/images/image1.jpg?view=thumbnail')
img = Image.open(StringIO(res.body))
img.verify()
img = Image.open(StringIO(res.body))
assert img.size[0] < 1000
assert img.size[1] < 1000
res = self.app.get('/images/image1.jpg?view=thumbnail&thumb_size=42')
img = Image.open(StringIO(res.body))
assert img.size[0] == 42
assert img.size[1] == 42
|
<commit_before><commit_msg>Test for the gallery plugin<commit_after>from __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
from PIL import Image
from StringIO import StringIO
import os
import shutil
class GalleryTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.ass2m = Ass2m(self.root)
self.ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
os.mkdir(os.path.join(self.root, 'images'))
os.mkdir(os.path.join(self.root, 'images', 'nothing'))
with open(os.path.join(self.root, 'DESCRIPTION'), 'w') as f:
f.write('This is my awesome gallery!')
with open(os.path.join(self.root, 'images', 'notanimage.txt'), 'w') as f:
f.write('HELLO')
with open(os.path.join(self.root, 'images', 'image1.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (255, 0, 0))
img.save(f, 'jpeg')
with open(os.path.join(self.root, 'images', 'image2.jpg'), 'wb') as f:
img = Image.new('RGB', (1337, 1337), (0, 255, 0))
img.save(f, 'jpeg')
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDefaultView(self):
res = self.app.get('/images/')
assert '<h1>Index of /images</h1>' in res.body
assert '<img' not in res.body
assert 'nothing' in res.body
res1 = self.app.get('/images/?view=gallery')
f = self.ass2m.storage.get_file('/images')
f.view = 'gallery'
f.save()
res2 = self.app.get('/images/')
for res in (res1, res2):
assert '<h1>Gallery of /images</h1>' in res.body
assert '<img' in res.body
assert 'nothing' in res.body
def test_getThumbnail(self):
res = self.app.get('/images/image1.jpg?view=thumbnail')
img = Image.open(StringIO(res.body))
img.verify()
img = Image.open(StringIO(res.body))
assert img.size[0] < 1000
assert img.size[1] < 1000
res = self.app.get('/images/image1.jpg?view=thumbnail&thumb_size=42')
img = Image.open(StringIO(res.body))
assert img.size[0] == 42
assert img.size[1] == 42
|
|
fe74d6c09b575f243e7750d8bb5d30cbd82bd485
|
enrique/problem.py
|
enrique/problem.py
|
from abc import ABCMeta, abstractmethod
class Problem(object):
__metaclass__ = ABCMeta
@abstractmethod
def init(self, *args, **kwargs):
"""Initialize the problem"""
raise NotImplementedError
@abstractmethod
def fitness_score(self, state):
"""Calculate the fitness score of the given ``state``
:returns: fitness score
"""
raise NotImplementedError
@abstractmethod
def mutation(self, state):
"""Mutates the given ``state`` to a new state
:returns: new state
"""
raise NotImplementedError
|
Add Problem abstract base class
|
Add Problem abstract base class
|
Python
|
mit
|
mesos-magellan/enrique
|
Add Problem abstract base class
|
from abc import ABCMeta, abstractmethod
class Problem(object):
__metaclass__ = ABCMeta
@abstractmethod
def init(self, *args, **kwargs):
"""Initialize the problem"""
raise NotImplementedError
@abstractmethod
def fitness_score(self, state):
"""Calculate the fitness score of the given ``state``
:returns: fitness score
"""
raise NotImplementedError
@abstractmethod
def mutation(self, state):
"""Mutates the given ``state`` to a new state
:returns: new state
"""
raise NotImplementedError
|
<commit_before><commit_msg>Add Problem abstract base class<commit_after>
|
from abc import ABCMeta, abstractmethod
class Problem(object):
__metaclass__ = ABCMeta
@abstractmethod
def init(self, *args, **kwargs):
"""Initialize the problem"""
raise NotImplementedError
@abstractmethod
def fitness_score(self, state):
"""Calculate the fitness score of the given ``state``
:returns: fitness score
"""
raise NotImplementedError
@abstractmethod
def mutation(self, state):
"""Mutates the given ``state`` to a new state
:returns: new state
"""
raise NotImplementedError
|
Add Problem abstract base classfrom abc import ABCMeta, abstractmethod
class Problem(object):
__metaclass__ = ABCMeta
@abstractmethod
def init(self, *args, **kwargs):
"""Initialize the problem"""
raise NotImplementedError
@abstractmethod
def fitness_score(self, state):
"""Calculate the fitness score of the given ``state``
:returns: fitness score
"""
raise NotImplementedError
@abstractmethod
def mutation(self, state):
"""Mutates the given ``state`` to a new state
:returns: new state
"""
raise NotImplementedError
|
<commit_before><commit_msg>Add Problem abstract base class<commit_after>from abc import ABCMeta, abstractmethod
class Problem(object):
__metaclass__ = ABCMeta
@abstractmethod
def init(self, *args, **kwargs):
"""Initialize the problem"""
raise NotImplementedError
@abstractmethod
def fitness_score(self, state):
"""Calculate the fitness score of the given ``state``
:returns: fitness score
"""
raise NotImplementedError
@abstractmethod
def mutation(self, state):
"""Mutates the given ``state`` to a new state
:returns: new state
"""
raise NotImplementedError
|
|
479f1792aabc9220a489445979b48781a8cf7ff9
|
tests/pytests/unit/states/test_influxdb_continuous_query.py
|
tests/pytests/unit/states/test_influxdb_continuous_query.py
|
import pytest
import salt.modules.influxdbmod as influx_mod
import salt.states.influxdb_continuous_query as influx
from tests.support.mock import create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {influx: {"__salt__": {}, "__opts__": {"test": False}}}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_kwargs",
(
{},
{"something": "extra"},
{"something": "extra", "even": "more"},
{"something": "extra", "still": "more and more and more", "and": "more"},
{
"something": "extra",
"what": "in tarnation",
"do": "you want",
"to": "add here?",
},
),
)
def test_when_present_is_called_it_should_pass_client_args_to_create_module(
expected_kwargs,
):
influx_module = create_autospec(influx_mod)
influx_module.continuous_query_exists.return_value = False
with patch.dict(
influx.__salt__,
{
"influxdb.continuous_query_exists": influx_module.continuous_query_exists,
"influxdb.create_continuous_query": influx_module.create_continuous_query,
},
):
influx.present(
name="foo",
database="fnord",
query="fnord",
resample_time="whatever",
coverage_period="fnord",
**expected_kwargs
)
actual_kwargs = influx_module.create_continuous_query.mock_calls[0].kwargs
assert actual_kwargs == expected_kwargs
|
Add tests for influxdb create_continuous_query
|
Add tests for influxdb create_continuous_query
Currently marked as xfail, since we'll pull the existing changes into
here.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests for influxdb create_continuous_query
Currently marked as xfail, since we'll pull the existing changes into
here.
|
import pytest
import salt.modules.influxdbmod as influx_mod
import salt.states.influxdb_continuous_query as influx
from tests.support.mock import create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {influx: {"__salt__": {}, "__opts__": {"test": False}}}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_kwargs",
(
{},
{"something": "extra"},
{"something": "extra", "even": "more"},
{"something": "extra", "still": "more and more and more", "and": "more"},
{
"something": "extra",
"what": "in tarnation",
"do": "you want",
"to": "add here?",
},
),
)
def test_when_present_is_called_it_should_pass_client_args_to_create_module(
expected_kwargs,
):
influx_module = create_autospec(influx_mod)
influx_module.continuous_query_exists.return_value = False
with patch.dict(
influx.__salt__,
{
"influxdb.continuous_query_exists": influx_module.continuous_query_exists,
"influxdb.create_continuous_query": influx_module.create_continuous_query,
},
):
influx.present(
name="foo",
database="fnord",
query="fnord",
resample_time="whatever",
coverage_period="fnord",
**expected_kwargs
)
actual_kwargs = influx_module.create_continuous_query.mock_calls[0].kwargs
assert actual_kwargs == expected_kwargs
|
<commit_before><commit_msg>Add tests for influxdb create_continuous_query
Currently marked as xfail, since we'll pull the existing changes into
here.<commit_after>
|
import pytest
import salt.modules.influxdbmod as influx_mod
import salt.states.influxdb_continuous_query as influx
from tests.support.mock import create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {influx: {"__salt__": {}, "__opts__": {"test": False}}}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_kwargs",
(
{},
{"something": "extra"},
{"something": "extra", "even": "more"},
{"something": "extra", "still": "more and more and more", "and": "more"},
{
"something": "extra",
"what": "in tarnation",
"do": "you want",
"to": "add here?",
},
),
)
def test_when_present_is_called_it_should_pass_client_args_to_create_module(
expected_kwargs,
):
influx_module = create_autospec(influx_mod)
influx_module.continuous_query_exists.return_value = False
with patch.dict(
influx.__salt__,
{
"influxdb.continuous_query_exists": influx_module.continuous_query_exists,
"influxdb.create_continuous_query": influx_module.create_continuous_query,
},
):
influx.present(
name="foo",
database="fnord",
query="fnord",
resample_time="whatever",
coverage_period="fnord",
**expected_kwargs
)
actual_kwargs = influx_module.create_continuous_query.mock_calls[0].kwargs
assert actual_kwargs == expected_kwargs
|
Add tests for influxdb create_continuous_query
Currently marked as xfail, since we'll pull the existing changes into
here.import pytest
import salt.modules.influxdbmod as influx_mod
import salt.states.influxdb_continuous_query as influx
from tests.support.mock import create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {influx: {"__salt__": {}, "__opts__": {"test": False}}}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_kwargs",
(
{},
{"something": "extra"},
{"something": "extra", "even": "more"},
{"something": "extra", "still": "more and more and more", "and": "more"},
{
"something": "extra",
"what": "in tarnation",
"do": "you want",
"to": "add here?",
},
),
)
def test_when_present_is_called_it_should_pass_client_args_to_create_module(
expected_kwargs,
):
influx_module = create_autospec(influx_mod)
influx_module.continuous_query_exists.return_value = False
with patch.dict(
influx.__salt__,
{
"influxdb.continuous_query_exists": influx_module.continuous_query_exists,
"influxdb.create_continuous_query": influx_module.create_continuous_query,
},
):
influx.present(
name="foo",
database="fnord",
query="fnord",
resample_time="whatever",
coverage_period="fnord",
**expected_kwargs
)
actual_kwargs = influx_module.create_continuous_query.mock_calls[0].kwargs
assert actual_kwargs == expected_kwargs
|
<commit_before><commit_msg>Add tests for influxdb create_continuous_query
Currently marked as xfail, since we'll pull the existing changes into
here.<commit_after>import pytest
import salt.modules.influxdbmod as influx_mod
import salt.states.influxdb_continuous_query as influx
from tests.support.mock import create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {influx: {"__salt__": {}, "__opts__": {"test": False}}}
@pytest.mark.xfail
@pytest.mark.parametrize(
"expected_kwargs",
(
{},
{"something": "extra"},
{"something": "extra", "even": "more"},
{"something": "extra", "still": "more and more and more", "and": "more"},
{
"something": "extra",
"what": "in tarnation",
"do": "you want",
"to": "add here?",
},
),
)
def test_when_present_is_called_it_should_pass_client_args_to_create_module(
expected_kwargs,
):
influx_module = create_autospec(influx_mod)
influx_module.continuous_query_exists.return_value = False
with patch.dict(
influx.__salt__,
{
"influxdb.continuous_query_exists": influx_module.continuous_query_exists,
"influxdb.create_continuous_query": influx_module.create_continuous_query,
},
):
influx.present(
name="foo",
database="fnord",
query="fnord",
resample_time="whatever",
coverage_period="fnord",
**expected_kwargs
)
actual_kwargs = influx_module.create_continuous_query.mock_calls[0].kwargs
assert actual_kwargs == expected_kwargs
|
|
513e6567b15adf2354f0b05f486d66ee0cbe2c94
|
tests/basics/seq-unpack.py
|
tests/basics/seq-unpack.py
|
# Basics
a, b = 1, 2
print(a, b)
a, b = (1, 2)
print(a, b)
(a, b) = 1, 2
print(a, b)
(a, b) = (1, 2)
print(a, b)
# Tuples/lists are optimized
a, b = [1, 2]
print(a, b)
[a, b] = 100, 200
print(a, b)
try:
a, b, c = (1, 2)
except ValueError:
print("ValueError")
try:
a, b, c = [1, 2, 3, 4]
except ValueError:
print("ValueError")
# Generic iterable object
a, b, c = range(3)
print(a, b, c)
try:
a, b, c = range(2)
except ValueError:
print("ValueError")
try:
a, b, c = range(4)
except ValueError:
print("ValueError")
|
Add testcase for sequence unpacking.
|
Add testcase for sequence unpacking.
|
Python
|
mit
|
omtinez/micropython,firstval/micropython,ganshun666/micropython,orionrobots/micropython,dinau/micropython,TDAbboud/micropython,cloudformdesign/micropython,heisewangluo/micropython,MrSurly/micropython-esp32,dinau/micropython,alex-robbins/micropython,Vogtinator/micropython,MrSurly/micropython,vriera/micropython,danicampora/micropython,cnoviello/micropython,jlillest/micropython,KISSMonX/micropython,emfcamp/micropython,skybird6672/micropython,EcmaXp/micropython,Vogtinator/micropython,Timmenem/micropython,xyb/micropython,ruffy91/micropython,SHA2017-badge/micropython-esp32,adamkh/micropython,rubencabrera/micropython,xhat/micropython,HenrikSolver/micropython,aitjcize/micropython,ernesto-g/micropython,methoxid/micropystat,micropython/micropython-esp32,selste/micropython,infinnovation/micropython,pramasoul/micropython,pfalcon/micropython,toolmacher/micropython,stonegithubs/micropython,ericsnowcurrently/micropython,lowRISC/micropython,adafruit/circuitpython,trezor/micropython,stonegithubs/micropython,xyb/micropython,hiway/micropython,hosaka/micropython,blazewicz/micropython,supergis/micropython,mgyenik/micropython,warner83/micropython,danicampora/micropython,redbear/micropython,misterdanb/micropython,turbinenreiter/micropython,dhylands/micropython,puuu/micropython,ericsnowcurrently/micropython,noahwilliamsson/micropython,drrk/micropython,aethaniel/micropython,xyb/micropython,emfcamp/micropython,toolmacher/micropython,rubencabrera/micropython,hosaka/micropython,bvernoux/micropython,Vogtinator/micropython,deshipu/micropython,vitiral/micropython,MrSurly/micropython,ruffy91/micropython,martinribelotta/micropython,slzatz/micropython,mpalomer/micropython,adafruit/micropython,pfalcon/micropython,matthewelse/micropython,SHA2017-badge/micropython-esp32,selste/micropython,selste/micropython,misterdanb/micropython,Peetz0r/micropython-esp32,swegener/micropython,jmarcelino/pycom-micropython,vriera/micropython,turbinenreiter/micropython,tuc-osg/micropython,supergis/micropython,SungEun-Steve-Kim/test-mp,xuxiaoxin/micropython,kostyll/micropython,KISSMonX/micropython,ryannathans/micropython,bvernoux/micropython,hiway/micropython,orionrobots/micropython,redbear/micropython,TDAbboud/micropython,ahotam/micropython,feilongfl/micropython,feilongfl/micropython,slzatz/micropython,noahchense/micropython,pozetroninc/micropython,EcmaXp/micropython,lbattraw/micropython,ceramos/micropython,ernesto-g/micropython,oopy/micropython,praemdonck/micropython,danicampora/micropython,AriZuu/micropython,lowRISC/micropython,warner83/micropython,swegener/micropython,kerneltask/micropython,cloudformdesign/micropython,aethaniel/micropython,vriera/micropython,pozetroninc/micropython,blazewicz/micropython,rubencabrera/micropython,heisewangluo/micropython,MrSurly/micropython-esp32,galenhz/micropython,pfalcon/micropython,henriknelson/micropython,deshipu/micropython,adafruit/micropython,martinribelotta/micropython,bvernoux/micropython,mpalomer/micropython,utopiaprince/micropython,suda/micropython,omtinez/micropython,MrSurly/micropython-esp32,feilongfl/micropython,mianos/micropython,dhylands/micropython,drrk/micropython,neilh10/micropython,adafruit/circuitpython,lowRISC/micropython,blmorris/micropython,paul-xxx/micropython,MrSurly/micropython,EcmaXp/micropython,blazewicz/micropython,cnoviello/micropython,xuxiaoxin/micropython,KISSMonX/micropython,adafruit/circuitpython,ceramos/micropython,Timmenem/micropython,SungEun-Steve-Kim/test-mp,puuu/micropython,misterdanb/micropython,selste/micropython,warner83/micropython,vitiral/micropython,adamkh/micropython,puuu/micropython,torwag/micropython,warner83/micropython,praemdonck/micropython,blmorris/micropython,tuc-osg/micropython,praemdonck/micropython,mhoffma/micropython,kerneltask/micropython,Timmenem/micropython,AriZuu/micropython,ceramos/micropython,torwag/micropython,drrk/micropython,ericsnowcurrently/micropython,dxxb/micropython,dxxb/micropython,praemdonck/micropython,infinnovation/micropython,alex-march/micropython,tobbad/micropython,galenhz/micropython,tdautc19841202/micropython,galenhz/micropython,paul-xxx/micropython,orionrobots/micropython,blmorris/micropython,ahotam/micropython,skybird6672/micropython,tuc-osg/micropython,MrSurly/micropython-esp32,ernesto-g/micropython,skybird6672/micropython,EcmaXp/micropython,redbear/micropython,adafruit/circuitpython,dxxb/micropython,vriera/micropython,EcmaXp/micropython,deshipu/micropython,turbinenreiter/micropython,aethaniel/micropython,HenrikSolver/micropython,omtinez/micropython,AriZuu/micropython,swegener/micropython,stonegithubs/micropython,aitjcize/micropython,emfcamp/micropython,lbattraw/micropython,dxxb/micropython,alex-robbins/micropython,torwag/micropython,chrisdearman/micropython,tobbad/micropython,tdautc19841202/micropython,methoxid/micropystat,SungEun-Steve-Kim/test-mp,SHA2017-badge/micropython-esp32,tuc-osg/micropython,ahotam/micropython,kostyll/micropython,noahchense/micropython,mpalomer/micropython,adafruit/micropython,rubencabrera/micropython,mianos/micropython,tdautc19841202/micropython,methoxid/micropystat,jimkmc/micropython,kostyll/micropython,oopy/micropython,emfcamp/micropython,alex-march/micropython,lbattraw/micropython,matthewelse/micropython,firstval/micropython,chrisdearman/micropython,orionrobots/micropython,torwag/micropython,dinau/micropython,cloudformdesign/micropython,PappaPeppar/micropython,xyb/micropython,SHA2017-badge/micropython-esp32,tuc-osg/micropython,suda/micropython,aitjcize/micropython,noahwilliamsson/micropython,xhat/micropython,henriknelson/micropython,pramasoul/micropython,xhat/micropython,HenrikSolver/micropython,Timmenem/micropython,infinnovation/micropython,tdautc19841202/micropython,methoxid/micropystat,hosaka/micropython,utopiaprince/micropython,SungEun-Steve-Kim/test-mp,drrk/micropython,adamkh/micropython,AriZuu/micropython,dmazzella/micropython,drrk/micropython,ChuckM/micropython,lowRISC/micropython,mgyenik/micropython,redbear/micropython,matthewelse/micropython,trezor/micropython,skybird6672/micropython,Timmenem/micropython,suda/micropython,utopiaprince/micropython,aethaniel/micropython,puuu/micropython,alex-march/micropython,jimkmc/micropython,selste/micropython,supergis/micropython,utopiaprince/micropython,heisewangluo/micropython,ericsnowcurrently/micropython,blmorris/micropython,paul-xxx/micropython,supergis/micropython,feilongfl/micropython,lbattraw/micropython,matthewelse/micropython,cloudformdesign/micropython,martinribelotta/micropython,tralamazza/micropython,adafruit/circuitpython,pramasoul/micropython,paul-xxx/micropython,bvernoux/micropython,xuxiaoxin/micropython,dmazzella/micropython,utopiaprince/micropython,mianos/micropython,stonegithubs/micropython,ChuckM/micropython,noahwilliamsson/micropython,dhylands/micropython,firstval/micropython,PappaPeppar/micropython,turbinenreiter/micropython,xuxiaoxin/micropython,misterdanb/micropython,ryannathans/micropython,mianos/micropython,dinau/micropython,KISSMonX/micropython,neilh10/micropython,turbinenreiter/micropython,puuu/micropython,mianos/micropython,ernesto-g/micropython,tobbad/micropython,noahchense/micropython,mpalomer/micropython,warner83/micropython,pozetroninc/micropython,toolmacher/micropython,neilh10/micropython,trezor/micropython,firstval/micropython,mgyenik/micropython,jlillest/micropython,vitiral/micropython,danicampora/micropython,tralamazza/micropython,Vogtinator/micropython,emfcamp/micropython,adamkh/micropython,slzatz/micropython,micropython/micropython-esp32,lbattraw/micropython,omtinez/micropython,skybird6672/micropython,ernesto-g/micropython,firstval/micropython,blmorris/micropython,ganshun666/micropython,ChuckM/micropython,hiway/micropython,xyb/micropython,mhoffma/micropython,adafruit/micropython,cwyark/micropython,methoxid/micropystat,heisewangluo/micropython,dmazzella/micropython,supergis/micropython,oopy/micropython,ryannathans/micropython,KISSMonX/micropython,oopy/micropython,hosaka/micropython,Peetz0r/micropython-esp32,ruffy91/micropython,alex-march/micropython,ruffy91/micropython,PappaPeppar/micropython,ganshun666/micropython,blazewicz/micropython,aitjcize/micropython,MrSurly/micropython,blazewicz/micropython,HenrikSolver/micropython,martinribelotta/micropython,mgyenik/micropython,trezor/micropython,cnoviello/micropython,alex-march/micropython,omtinez/micropython,xhat/micropython,rubencabrera/micropython,AriZuu/micropython,mpalomer/micropython,bvernoux/micropython,swegener/micropython,neilh10/micropython,jimkmc/micropython,aethaniel/micropython,cloudformdesign/micropython,jimkmc/micropython,ChuckM/micropython,kostyll/micropython,dxxb/micropython,vitiral/micropython,martinribelotta/micropython,ceramos/micropython,PappaPeppar/micropython,pfalcon/micropython,slzatz/micropython,jmarcelino/pycom-micropython,Vogtinator/micropython,chrisdearman/micropython,xuxiaoxin/micropython,adamkh/micropython,pozetroninc/micropython,heisewangluo/micropython,SungEun-Steve-Kim/test-mp,vitiral/micropython,deshipu/micropython,misterdanb/micropython,mhoffma/micropython,praemdonck/micropython,jmarcelino/pycom-micropython,jimkmc/micropython,ganshun666/micropython,tralamazza/micropython,noahwilliamsson/micropython,kostyll/micropython,jmarcelino/pycom-micropython,swegener/micropython,danicampora/micropython,cnoviello/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,jlillest/micropython,henriknelson/micropython,tobbad/micropython,hiway/micropython,ChuckM/micropython,alex-robbins/micropython,micropython/micropython-esp32,alex-robbins/micropython,cnoviello/micropython,pfalcon/micropython,feilongfl/micropython,MrSurly/micropython-esp32,mgyenik/micropython,kerneltask/micropython,cwyark/micropython,kerneltask/micropython,tobbad/micropython,matthewelse/micropython,dinau/micropython,deshipu/micropython,toolmacher/micropython,MrSurly/micropython,pramasoul/micropython,henriknelson/micropython,slzatz/micropython,tdautc19841202/micropython,noahwilliamsson/micropython,dhylands/micropython,kerneltask/micropython,paul-xxx/micropython,SHA2017-badge/micropython-esp32,TDAbboud/micropython,ruffy91/micropython,adafruit/micropython,mhoffma/micropython,adafruit/circuitpython,ceramos/micropython,cwyark/micropython,galenhz/micropython,orionrobots/micropython,mhoffma/micropython,ahotam/micropython,torwag/micropython,ganshun666/micropython,toolmacher/micropython,PappaPeppar/micropython,cwyark/micropython,stonegithubs/micropython,Peetz0r/micropython-esp32,suda/micropython,vriera/micropython,infinnovation/micropython,jlillest/micropython,chrisdearman/micropython,noahchense/micropython,trezor/micropython,ericsnowcurrently/micropython,infinnovation/micropython,lowRISC/micropython,xhat/micropython,ahotam/micropython,hosaka/micropython,jmarcelino/pycom-micropython,pozetroninc/micropython,cwyark/micropython,noahchense/micropython,chrisdearman/micropython,hiway/micropython,matthewelse/micropython,micropython/micropython-esp32,dmazzella/micropython,dhylands/micropython,pramasoul/micropython,neilh10/micropython,HenrikSolver/micropython,redbear/micropython,TDAbboud/micropython,oopy/micropython,tralamazza/micropython,galenhz/micropython,TDAbboud/micropython,alex-robbins/micropython,Peetz0r/micropython-esp32,micropython/micropython-esp32,suda/micropython,ryannathans/micropython,jlillest/micropython,ryannathans/micropython
|
Add testcase for sequence unpacking.
|
# Basics
a, b = 1, 2
print(a, b)
a, b = (1, 2)
print(a, b)
(a, b) = 1, 2
print(a, b)
(a, b) = (1, 2)
print(a, b)
# Tuples/lists are optimized
a, b = [1, 2]
print(a, b)
[a, b] = 100, 200
print(a, b)
try:
a, b, c = (1, 2)
except ValueError:
print("ValueError")
try:
a, b, c = [1, 2, 3, 4]
except ValueError:
print("ValueError")
# Generic iterable object
a, b, c = range(3)
print(a, b, c)
try:
a, b, c = range(2)
except ValueError:
print("ValueError")
try:
a, b, c = range(4)
except ValueError:
print("ValueError")
|
<commit_before><commit_msg>Add testcase for sequence unpacking.<commit_after>
|
# Basics
a, b = 1, 2
print(a, b)
a, b = (1, 2)
print(a, b)
(a, b) = 1, 2
print(a, b)
(a, b) = (1, 2)
print(a, b)
# Tuples/lists are optimized
a, b = [1, 2]
print(a, b)
[a, b] = 100, 200
print(a, b)
try:
a, b, c = (1, 2)
except ValueError:
print("ValueError")
try:
a, b, c = [1, 2, 3, 4]
except ValueError:
print("ValueError")
# Generic iterable object
a, b, c = range(3)
print(a, b, c)
try:
a, b, c = range(2)
except ValueError:
print("ValueError")
try:
a, b, c = range(4)
except ValueError:
print("ValueError")
|
Add testcase for sequence unpacking.# Basics
a, b = 1, 2
print(a, b)
a, b = (1, 2)
print(a, b)
(a, b) = 1, 2
print(a, b)
(a, b) = (1, 2)
print(a, b)
# Tuples/lists are optimized
a, b = [1, 2]
print(a, b)
[a, b] = 100, 200
print(a, b)
try:
a, b, c = (1, 2)
except ValueError:
print("ValueError")
try:
a, b, c = [1, 2, 3, 4]
except ValueError:
print("ValueError")
# Generic iterable object
a, b, c = range(3)
print(a, b, c)
try:
a, b, c = range(2)
except ValueError:
print("ValueError")
try:
a, b, c = range(4)
except ValueError:
print("ValueError")
|
<commit_before><commit_msg>Add testcase for sequence unpacking.<commit_after># Basics
a, b = 1, 2
print(a, b)
a, b = (1, 2)
print(a, b)
(a, b) = 1, 2
print(a, b)
(a, b) = (1, 2)
print(a, b)
# Tuples/lists are optimized
a, b = [1, 2]
print(a, b)
[a, b] = 100, 200
print(a, b)
try:
a, b, c = (1, 2)
except ValueError:
print("ValueError")
try:
a, b, c = [1, 2, 3, 4]
except ValueError:
print("ValueError")
# Generic iterable object
a, b, c = range(3)
print(a, b, c)
try:
a, b, c = range(2)
except ValueError:
print("ValueError")
try:
a, b, c = range(4)
except ValueError:
print("ValueError")
|
|
cc4b7da371b5c188812ff9b2c4a5d1cd49178374
|
tests/test_serialization.py
|
tests/test_serialization.py
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_serialization():
rule = Rule(
recurrence.WEEKLY
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_complex_rule_serialization():
rule = Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[
recurrence.to_weekday('-1MO'),
recurrence.to_weekday('TU')
],
bymonth=[1, 3]
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY;INTERVAL=17;WKST=TU;COUNT=7;BYDAY=-1MO,TU;BYMONTH=1,3' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_bug_in_count_and_until_rule_serialization():
# This tests a bug in the way we serialize rules with instance
# counts and an until date. We should really raise a
# ValidationError in validate if you specify both, but we
# currently don't. Once we start doing this, this test can be
# modified to check an exception is raised.
rule = Rule(
recurrence.WEEKLY,
count=7,
until=datetime(2014, 10, 31, 0, 0, 0)
)
serialized = recurrence.serialize(rule)
# Note that we've got no UNTIL value here
assert 'RRULE:FREQ=WEEKLY;COUNT=7' == serialized
|
Add some tests for serializing rules
|
Add some tests for serializing rules
|
Python
|
bsd-3-clause
|
django-recurrence/django-recurrence,FrankSalad/django-recurrence,linux2400/django-recurrence,linux2400/django-recurrence,Nikola-K/django-recurrence,django-recurrence/django-recurrence,FrankSalad/django-recurrence,Nikola-K/django-recurrence
|
Add some tests for serializing rules
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_serialization():
rule = Rule(
recurrence.WEEKLY
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_complex_rule_serialization():
rule = Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[
recurrence.to_weekday('-1MO'),
recurrence.to_weekday('TU')
],
bymonth=[1, 3]
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY;INTERVAL=17;WKST=TU;COUNT=7;BYDAY=-1MO,TU;BYMONTH=1,3' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_bug_in_count_and_until_rule_serialization():
# This tests a bug in the way we serialize rules with instance
# counts and an until date. We should really raise a
# ValidationError in validate if you specify both, but we
# currently don't. Once we start doing this, this test can be
# modified to check an exception is raised.
rule = Rule(
recurrence.WEEKLY,
count=7,
until=datetime(2014, 10, 31, 0, 0, 0)
)
serialized = recurrence.serialize(rule)
# Note that we've got no UNTIL value here
assert 'RRULE:FREQ=WEEKLY;COUNT=7' == serialized
|
<commit_before><commit_msg>Add some tests for serializing rules<commit_after>
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_serialization():
rule = Rule(
recurrence.WEEKLY
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_complex_rule_serialization():
rule = Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[
recurrence.to_weekday('-1MO'),
recurrence.to_weekday('TU')
],
bymonth=[1, 3]
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY;INTERVAL=17;WKST=TU;COUNT=7;BYDAY=-1MO,TU;BYMONTH=1,3' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_bug_in_count_and_until_rule_serialization():
# This tests a bug in the way we serialize rules with instance
# counts and an until date. We should really raise a
# ValidationError in validate if you specify both, but we
# currently don't. Once we start doing this, this test can be
# modified to check an exception is raised.
rule = Rule(
recurrence.WEEKLY,
count=7,
until=datetime(2014, 10, 31, 0, 0, 0)
)
serialized = recurrence.serialize(rule)
# Note that we've got no UNTIL value here
assert 'RRULE:FREQ=WEEKLY;COUNT=7' == serialized
|
Add some tests for serializing rulesfrom datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_serialization():
rule = Rule(
recurrence.WEEKLY
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_complex_rule_serialization():
rule = Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[
recurrence.to_weekday('-1MO'),
recurrence.to_weekday('TU')
],
bymonth=[1, 3]
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY;INTERVAL=17;WKST=TU;COUNT=7;BYDAY=-1MO,TU;BYMONTH=1,3' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_bug_in_count_and_until_rule_serialization():
# This tests a bug in the way we serialize rules with instance
# counts and an until date. We should really raise a
# ValidationError in validate if you specify both, but we
# currently don't. Once we start doing this, this test can be
# modified to check an exception is raised.
rule = Rule(
recurrence.WEEKLY,
count=7,
until=datetime(2014, 10, 31, 0, 0, 0)
)
serialized = recurrence.serialize(rule)
# Note that we've got no UNTIL value here
assert 'RRULE:FREQ=WEEKLY;COUNT=7' == serialized
|
<commit_before><commit_msg>Add some tests for serializing rules<commit_after>from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_serialization():
rule = Rule(
recurrence.WEEKLY
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_complex_rule_serialization():
rule = Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[
recurrence.to_weekday('-1MO'),
recurrence.to_weekday('TU')
],
bymonth=[1, 3]
)
serialized = recurrence.serialize(rule)
assert 'RRULE:FREQ=WEEKLY;INTERVAL=17;WKST=TU;COUNT=7;BYDAY=-1MO,TU;BYMONTH=1,3' == serialized
assert recurrence.deserialize(serialized) == Recurrence(rrules=[rule])
def test_bug_in_count_and_until_rule_serialization():
# This tests a bug in the way we serialize rules with instance
# counts and an until date. We should really raise a
# ValidationError in validate if you specify both, but we
# currently don't. Once we start doing this, this test can be
# modified to check an exception is raised.
rule = Rule(
recurrence.WEEKLY,
count=7,
until=datetime(2014, 10, 31, 0, 0, 0)
)
serialized = recurrence.serialize(rule)
# Note that we've got no UNTIL value here
assert 'RRULE:FREQ=WEEKLY;COUNT=7' == serialized
|
|
134f7fda0a7d48e22b48d02b3391142e3b4d59a1
|
tests/test_whoami_resource.py
|
tests/test_whoami_resource.py
|
import unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
def test_returns_user_info(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
Add tests for whoami resource
|
[CHORE] Add tests for whoami resource
|
Python
|
mit
|
brayoh/bucket-list-api
|
[CHORE] Add tests for whoami resource
|
import unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
def test_returns_user_info(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
<commit_before><commit_msg>[CHORE] Add tests for whoami resource<commit_after>
|
import unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
def test_returns_user_info(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
[CHORE] Add tests for whoami resourceimport unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
def test_returns_user_info(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
<commit_before><commit_msg>[CHORE] Add tests for whoami resource<commit_after>import unittest
import json
from tests.base import Base
class TestWhoAmIResource(Base):
def test_returns_user_info(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=self.set_headers())
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["id"], 1)
self.assertEquals(reponse["username"],"brian")
self.assertEquals(payload.status_code, 200)
def test_invalid_token_denied(self):
self.client.post("/api/v1/auth/register",
data=self.user,
content_type='application/json')
payload = self.client.get("/api/v1/whoami", headers=
dict({"Authorization": "tiainsansindad"}))
reponse = json.loads(payload.data.decode())
self.assertEquals(reponse["status"], "failed")
self.assertEquals(reponse["message"],
"Invalid token, please login again")
self.assertEquals(payload.status_code, 401)
|
|
683dd2300ad9a40875b86118c8f9c4a8c2b11b91
|
scripts/search-for-similar-strings.py
|
scripts/search-for-similar-strings.py
|
import json
import os
import click
from difflib import SequenceMatcher
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [(".".join(key), value)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
@click.command()
@click.option('--threshold', default=1.0, help='Minimun similarity to show')
@click.option('--min-length', default=10, help='Minimun size of the string to show')
@click.option('--omit-identical', default=False, is_flag=True, help='Omit identical strings')
def verify_similarity(threshold, min_length, omit_identical):
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_keywords = keywords(None, locales)
already_shown_keys = set()
for key1, value1 in all_keywords:
for key2, value2 in all_keywords:
if key1 == key2:
continue
if len(value1) < min_length and len(value2) < min_length:
continue
similarity = SequenceMatcher(None, value1, value2).ratio()
if omit_identical and similarity == 1.0:
continue
if similarity >= threshold:
if (key1, key2) not in already_shown_keys:
already_shown_keys.add((key1, key2))
already_shown_keys.add((key2, key1))
click.echo(
"The keys {} and {} has a similarity of {}\n - {}\n - {}".format(
key1,
key2,
similarity,
value1,
value2
)
)
if __name__ == "__main__":
verify_similarity()
|
Add script for detect similarities in translation string
|
Add script for detect similarities in translation string
|
Python
|
agpl-3.0
|
taigaio/taiga-front,taigaio/taiga-front,taigaio/taiga-front
|
Add script for detect similarities in translation string
|
import json
import os
import click
from difflib import SequenceMatcher
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [(".".join(key), value)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
@click.command()
@click.option('--threshold', default=1.0, help='Minimun similarity to show')
@click.option('--min-length', default=10, help='Minimun size of the string to show')
@click.option('--omit-identical', default=False, is_flag=True, help='Omit identical strings')
def verify_similarity(threshold, min_length, omit_identical):
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_keywords = keywords(None, locales)
already_shown_keys = set()
for key1, value1 in all_keywords:
for key2, value2 in all_keywords:
if key1 == key2:
continue
if len(value1) < min_length and len(value2) < min_length:
continue
similarity = SequenceMatcher(None, value1, value2).ratio()
if omit_identical and similarity == 1.0:
continue
if similarity >= threshold:
if (key1, key2) not in already_shown_keys:
already_shown_keys.add((key1, key2))
already_shown_keys.add((key2, key1))
click.echo(
"The keys {} and {} has a similarity of {}\n - {}\n - {}".format(
key1,
key2,
similarity,
value1,
value2
)
)
if __name__ == "__main__":
verify_similarity()
|
<commit_before><commit_msg>Add script for detect similarities in translation string<commit_after>
|
import json
import os
import click
from difflib import SequenceMatcher
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [(".".join(key), value)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
@click.command()
@click.option('--threshold', default=1.0, help='Minimun similarity to show')
@click.option('--min-length', default=10, help='Minimun size of the string to show')
@click.option('--omit-identical', default=False, is_flag=True, help='Omit identical strings')
def verify_similarity(threshold, min_length, omit_identical):
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_keywords = keywords(None, locales)
already_shown_keys = set()
for key1, value1 in all_keywords:
for key2, value2 in all_keywords:
if key1 == key2:
continue
if len(value1) < min_length and len(value2) < min_length:
continue
similarity = SequenceMatcher(None, value1, value2).ratio()
if omit_identical and similarity == 1.0:
continue
if similarity >= threshold:
if (key1, key2) not in already_shown_keys:
already_shown_keys.add((key1, key2))
already_shown_keys.add((key2, key1))
click.echo(
"The keys {} and {} has a similarity of {}\n - {}\n - {}".format(
key1,
key2,
similarity,
value1,
value2
)
)
if __name__ == "__main__":
verify_similarity()
|
Add script for detect similarities in translation stringimport json
import os
import click
from difflib import SequenceMatcher
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [(".".join(key), value)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
@click.command()
@click.option('--threshold', default=1.0, help='Minimun similarity to show')
@click.option('--min-length', default=10, help='Minimun size of the string to show')
@click.option('--omit-identical', default=False, is_flag=True, help='Omit identical strings')
def verify_similarity(threshold, min_length, omit_identical):
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_keywords = keywords(None, locales)
already_shown_keys = set()
for key1, value1 in all_keywords:
for key2, value2 in all_keywords:
if key1 == key2:
continue
if len(value1) < min_length and len(value2) < min_length:
continue
similarity = SequenceMatcher(None, value1, value2).ratio()
if omit_identical and similarity == 1.0:
continue
if similarity >= threshold:
if (key1, key2) not in already_shown_keys:
already_shown_keys.add((key1, key2))
already_shown_keys.add((key2, key1))
click.echo(
"The keys {} and {} has a similarity of {}\n - {}\n - {}".format(
key1,
key2,
similarity,
value1,
value2
)
)
if __name__ == "__main__":
verify_similarity()
|
<commit_before><commit_msg>Add script for detect similarities in translation string<commit_after>import json
import os
import click
from difflib import SequenceMatcher
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [(".".join(key), value)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
@click.command()
@click.option('--threshold', default=1.0, help='Minimun similarity to show')
@click.option('--min-length', default=10, help='Minimun size of the string to show')
@click.option('--omit-identical', default=False, is_flag=True, help='Omit identical strings')
def verify_similarity(threshold, min_length, omit_identical):
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_keywords = keywords(None, locales)
already_shown_keys = set()
for key1, value1 in all_keywords:
for key2, value2 in all_keywords:
if key1 == key2:
continue
if len(value1) < min_length and len(value2) < min_length:
continue
similarity = SequenceMatcher(None, value1, value2).ratio()
if omit_identical and similarity == 1.0:
continue
if similarity >= threshold:
if (key1, key2) not in already_shown_keys:
already_shown_keys.add((key1, key2))
already_shown_keys.add((key2, key1))
click.echo(
"The keys {} and {} has a similarity of {}\n - {}\n - {}".format(
key1,
key2,
similarity,
value1,
value2
)
)
if __name__ == "__main__":
verify_similarity()
|
|
1fc16b52736ba2f794c7111366a66f4eba8ced9e
|
examples/randomuser-sqlite.py
|
examples/randomuser-sqlite.py
|
#!/usr/bin/env python3
# coding: utf-8
import json # https://docs.python.org/3/library/json.html
import requests # https://github.com/kennethreitz/requests
import records # https://github.com/kennethreitz/records
# randomuser.me generates random 'user' data (name, email, addr, phone number, etc)
r = requests.get('http://api.randomuser.me/0.6/?nat=us&results=10')
j = r.json()['results']
# Valid SQLite URL forms are:
# sqlite:///:memory: (or, sqlite://)
# sqlite:///relative/path/to/file.db
# sqlite:////absolute/path/to/file.db
# records will create this db on disk if 'users.db' doesn't exist already
db = records.Database('sqlite:///users.db')
db.query('DROP TABLE IF EXISTS persons')
db.query('CREATE TABLE persons (key int PRIMARY KEY, fname text, lname text, email text)')
for rec in j:
user = rec['user']
name = user['name']
key = user['registered']
fname = name['first']
lname = name['last']
email = user['email']
db.query('INSERT INTO persons (key, fname, lname, email) VALUES(:key, :fname, :lname, :email)',
key=key, fname=fname, lname=lname, email=email)
rows = db.query('SELECT * FROM persons')
print(rows.export('csv'))
|
Add example usage with sqlite
|
Add example usage with sqlite
|
Python
|
isc
|
kennethreitz/records
|
Add example usage with sqlite
|
#!/usr/bin/env python3
# coding: utf-8
import json # https://docs.python.org/3/library/json.html
import requests # https://github.com/kennethreitz/requests
import records # https://github.com/kennethreitz/records
# randomuser.me generates random 'user' data (name, email, addr, phone number, etc)
r = requests.get('http://api.randomuser.me/0.6/?nat=us&results=10')
j = r.json()['results']
# Valid SQLite URL forms are:
# sqlite:///:memory: (or, sqlite://)
# sqlite:///relative/path/to/file.db
# sqlite:////absolute/path/to/file.db
# records will create this db on disk if 'users.db' doesn't exist already
db = records.Database('sqlite:///users.db')
db.query('DROP TABLE IF EXISTS persons')
db.query('CREATE TABLE persons (key int PRIMARY KEY, fname text, lname text, email text)')
for rec in j:
user = rec['user']
name = user['name']
key = user['registered']
fname = name['first']
lname = name['last']
email = user['email']
db.query('INSERT INTO persons (key, fname, lname, email) VALUES(:key, :fname, :lname, :email)',
key=key, fname=fname, lname=lname, email=email)
rows = db.query('SELECT * FROM persons')
print(rows.export('csv'))
|
<commit_before><commit_msg>Add example usage with sqlite<commit_after>
|
#!/usr/bin/env python3
# coding: utf-8
import json # https://docs.python.org/3/library/json.html
import requests # https://github.com/kennethreitz/requests
import records # https://github.com/kennethreitz/records
# randomuser.me generates random 'user' data (name, email, addr, phone number, etc)
r = requests.get('http://api.randomuser.me/0.6/?nat=us&results=10')
j = r.json()['results']
# Valid SQLite URL forms are:
# sqlite:///:memory: (or, sqlite://)
# sqlite:///relative/path/to/file.db
# sqlite:////absolute/path/to/file.db
# records will create this db on disk if 'users.db' doesn't exist already
db = records.Database('sqlite:///users.db')
db.query('DROP TABLE IF EXISTS persons')
db.query('CREATE TABLE persons (key int PRIMARY KEY, fname text, lname text, email text)')
for rec in j:
user = rec['user']
name = user['name']
key = user['registered']
fname = name['first']
lname = name['last']
email = user['email']
db.query('INSERT INTO persons (key, fname, lname, email) VALUES(:key, :fname, :lname, :email)',
key=key, fname=fname, lname=lname, email=email)
rows = db.query('SELECT * FROM persons')
print(rows.export('csv'))
|
Add example usage with sqlite#!/usr/bin/env python3
# coding: utf-8
import json # https://docs.python.org/3/library/json.html
import requests # https://github.com/kennethreitz/requests
import records # https://github.com/kennethreitz/records
# randomuser.me generates random 'user' data (name, email, addr, phone number, etc)
r = requests.get('http://api.randomuser.me/0.6/?nat=us&results=10')
j = r.json()['results']
# Valid SQLite URL forms are:
# sqlite:///:memory: (or, sqlite://)
# sqlite:///relative/path/to/file.db
# sqlite:////absolute/path/to/file.db
# records will create this db on disk if 'users.db' doesn't exist already
db = records.Database('sqlite:///users.db')
db.query('DROP TABLE IF EXISTS persons')
db.query('CREATE TABLE persons (key int PRIMARY KEY, fname text, lname text, email text)')
for rec in j:
user = rec['user']
name = user['name']
key = user['registered']
fname = name['first']
lname = name['last']
email = user['email']
db.query('INSERT INTO persons (key, fname, lname, email) VALUES(:key, :fname, :lname, :email)',
key=key, fname=fname, lname=lname, email=email)
rows = db.query('SELECT * FROM persons')
print(rows.export('csv'))
|
<commit_before><commit_msg>Add example usage with sqlite<commit_after>#!/usr/bin/env python3
# coding: utf-8
import json # https://docs.python.org/3/library/json.html
import requests # https://github.com/kennethreitz/requests
import records # https://github.com/kennethreitz/records
# randomuser.me generates random 'user' data (name, email, addr, phone number, etc)
r = requests.get('http://api.randomuser.me/0.6/?nat=us&results=10')
j = r.json()['results']
# Valid SQLite URL forms are:
# sqlite:///:memory: (or, sqlite://)
# sqlite:///relative/path/to/file.db
# sqlite:////absolute/path/to/file.db
# records will create this db on disk if 'users.db' doesn't exist already
db = records.Database('sqlite:///users.db')
db.query('DROP TABLE IF EXISTS persons')
db.query('CREATE TABLE persons (key int PRIMARY KEY, fname text, lname text, email text)')
for rec in j:
user = rec['user']
name = user['name']
key = user['registered']
fname = name['first']
lname = name['last']
email = user['email']
db.query('INSERT INTO persons (key, fname, lname, email) VALUES(:key, :fname, :lname, :email)',
key=key, fname=fname, lname=lname, email=email)
rows = db.query('SELECT * FROM persons')
print(rows.export('csv'))
|
|
b1e385b50e13ed53501ae9945f29ebe0557540f8
|
kolibri/core/logger/migrations/0010_min_length_validation.py
|
kolibri/core/logger/migrations/0010_min_length_validation.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-04 16:08
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("logger", "0009_null_channel_id_unconstrained_mastery_level"),
]
operations = [
migrations.AlterField(
model_name="attemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsessionlog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsummarylog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="examattemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
]
|
Add migration for min-length validation change
|
Add migration for min-length validation change
|
Python
|
mit
|
learningequality/kolibri,learningequality/kolibri,learningequality/kolibri,learningequality/kolibri
|
Add migration for min-length validation change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-04 16:08
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("logger", "0009_null_channel_id_unconstrained_mastery_level"),
]
operations = [
migrations.AlterField(
model_name="attemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsessionlog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsummarylog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="examattemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
]
|
<commit_before><commit_msg>Add migration for min-length validation change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-04 16:08
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("logger", "0009_null_channel_id_unconstrained_mastery_level"),
]
operations = [
migrations.AlterField(
model_name="attemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsessionlog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsummarylog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="examattemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
]
|
Add migration for min-length validation change# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-04 16:08
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("logger", "0009_null_channel_id_unconstrained_mastery_level"),
]
operations = [
migrations.AlterField(
model_name="attemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsessionlog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsummarylog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="examattemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
]
|
<commit_before><commit_msg>Add migration for min-length validation change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-04 16:08
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("logger", "0009_null_channel_id_unconstrained_mastery_level"),
]
operations = [
migrations.AlterField(
model_name="attemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsessionlog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="contentsummarylog",
name="kind",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
migrations.AlterField(
model_name="examattemptlog",
name="item",
field=models.CharField(
max_length=200,
validators=[django.core.validators.MinLengthValidator(1)],
),
),
]
|
|
29dabe00aa21858983804d01afd3458f8192f70e
|
main.py
|
main.py
|
import csv
import sys
import itertools
def count_iter(iterable):
# count lines in terable
return sum(1 for _ in iterable)
def setup_iter(reader):
# Skip first row of the file
readeriter = iter(reader)
next(readeriter)
return readeriter
def list_get(li, pos, default):
try:
return li[pos]
except IndexError:
return default
if __name__ == '__main__':
try:
name_of_file = sys.argv[1]
except IndexError:
print 'Usage: python {} filename.csv [start_percent [end_percent]]'.format(sys.argv[0])
sys.exit(1)
# maps numbers to frequency those numbers show up
# so its a histogram
num_to_freq = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
start_percent = float(list_get(sys.argv, 2, 0.0))
end_percent = float(list_get(sys.argv, 3, 1.0))
with open(name_of_file, 'r') as csvfile:
reader = csv.reader(csvfile)
itr1, itr2 = itertools.tee(setup_iter(reader))
lines = count_iter(itr1)
start = start_percent * lines
end = end_percent * lines
for index, row in enumerate(itr2):
if index < start:
continue
if index >= end:
break
for i in row[1].split(','):
try:
toint = int(i)
except ValueError:
continue
num_to_freq[toint] += 1
for key, val in num_to_freq.items():
print "{0}: {1}".format(key, val)
|
Add the whole program nice job
|
Add the whole program nice job
|
Python
|
mit
|
brockuniera/CSV-Column-Two-Histogram
|
Add the whole program nice job
|
import csv
import sys
import itertools
def count_iter(iterable):
# count lines in terable
return sum(1 for _ in iterable)
def setup_iter(reader):
# Skip first row of the file
readeriter = iter(reader)
next(readeriter)
return readeriter
def list_get(li, pos, default):
try:
return li[pos]
except IndexError:
return default
if __name__ == '__main__':
try:
name_of_file = sys.argv[1]
except IndexError:
print 'Usage: python {} filename.csv [start_percent [end_percent]]'.format(sys.argv[0])
sys.exit(1)
# maps numbers to frequency those numbers show up
# so its a histogram
num_to_freq = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
start_percent = float(list_get(sys.argv, 2, 0.0))
end_percent = float(list_get(sys.argv, 3, 1.0))
with open(name_of_file, 'r') as csvfile:
reader = csv.reader(csvfile)
itr1, itr2 = itertools.tee(setup_iter(reader))
lines = count_iter(itr1)
start = start_percent * lines
end = end_percent * lines
for index, row in enumerate(itr2):
if index < start:
continue
if index >= end:
break
for i in row[1].split(','):
try:
toint = int(i)
except ValueError:
continue
num_to_freq[toint] += 1
for key, val in num_to_freq.items():
print "{0}: {1}".format(key, val)
|
<commit_before><commit_msg>Add the whole program nice job<commit_after>
|
import csv
import sys
import itertools
def count_iter(iterable):
# count lines in terable
return sum(1 for _ in iterable)
def setup_iter(reader):
# Skip first row of the file
readeriter = iter(reader)
next(readeriter)
return readeriter
def list_get(li, pos, default):
try:
return li[pos]
except IndexError:
return default
if __name__ == '__main__':
try:
name_of_file = sys.argv[1]
except IndexError:
print 'Usage: python {} filename.csv [start_percent [end_percent]]'.format(sys.argv[0])
sys.exit(1)
# maps numbers to frequency those numbers show up
# so its a histogram
num_to_freq = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
start_percent = float(list_get(sys.argv, 2, 0.0))
end_percent = float(list_get(sys.argv, 3, 1.0))
with open(name_of_file, 'r') as csvfile:
reader = csv.reader(csvfile)
itr1, itr2 = itertools.tee(setup_iter(reader))
lines = count_iter(itr1)
start = start_percent * lines
end = end_percent * lines
for index, row in enumerate(itr2):
if index < start:
continue
if index >= end:
break
for i in row[1].split(','):
try:
toint = int(i)
except ValueError:
continue
num_to_freq[toint] += 1
for key, val in num_to_freq.items():
print "{0}: {1}".format(key, val)
|
Add the whole program nice jobimport csv
import sys
import itertools
def count_iter(iterable):
# count lines in terable
return sum(1 for _ in iterable)
def setup_iter(reader):
# Skip first row of the file
readeriter = iter(reader)
next(readeriter)
return readeriter
def list_get(li, pos, default):
try:
return li[pos]
except IndexError:
return default
if __name__ == '__main__':
try:
name_of_file = sys.argv[1]
except IndexError:
print 'Usage: python {} filename.csv [start_percent [end_percent]]'.format(sys.argv[0])
sys.exit(1)
# maps numbers to frequency those numbers show up
# so its a histogram
num_to_freq = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
start_percent = float(list_get(sys.argv, 2, 0.0))
end_percent = float(list_get(sys.argv, 3, 1.0))
with open(name_of_file, 'r') as csvfile:
reader = csv.reader(csvfile)
itr1, itr2 = itertools.tee(setup_iter(reader))
lines = count_iter(itr1)
start = start_percent * lines
end = end_percent * lines
for index, row in enumerate(itr2):
if index < start:
continue
if index >= end:
break
for i in row[1].split(','):
try:
toint = int(i)
except ValueError:
continue
num_to_freq[toint] += 1
for key, val in num_to_freq.items():
print "{0}: {1}".format(key, val)
|
<commit_before><commit_msg>Add the whole program nice job<commit_after>import csv
import sys
import itertools
def count_iter(iterable):
# count lines in terable
return sum(1 for _ in iterable)
def setup_iter(reader):
# Skip first row of the file
readeriter = iter(reader)
next(readeriter)
return readeriter
def list_get(li, pos, default):
try:
return li[pos]
except IndexError:
return default
if __name__ == '__main__':
try:
name_of_file = sys.argv[1]
except IndexError:
print 'Usage: python {} filename.csv [start_percent [end_percent]]'.format(sys.argv[0])
sys.exit(1)
# maps numbers to frequency those numbers show up
# so its a histogram
num_to_freq = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
}
start_percent = float(list_get(sys.argv, 2, 0.0))
end_percent = float(list_get(sys.argv, 3, 1.0))
with open(name_of_file, 'r') as csvfile:
reader = csv.reader(csvfile)
itr1, itr2 = itertools.tee(setup_iter(reader))
lines = count_iter(itr1)
start = start_percent * lines
end = end_percent * lines
for index, row in enumerate(itr2):
if index < start:
continue
if index >= end:
break
for i in row[1].split(','):
try:
toint = int(i)
except ValueError:
continue
num_to_freq[toint] += 1
for key, val in num_to_freq.items():
print "{0}: {1}".format(key, val)
|
|
71cb3d71443dc1b28a4aa62dcd0b880e2b5f5cac
|
nettests/core/keyword_filtering.py
|
nettests/core/keyword_filtering.py
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class KeywordFiltering(httpt.HTTPTest):
"""
This test involves performing HTTP requests containing to be tested for
censorship keywords.
"""
name = "Keyword Filtering"
author = "Arturo Filastò"
version = 0.1
optParameters = [['backend', 'b', None, 'URL of the backend system to use for testing']]
inputFile = ['file', 'f', None, 'List of keywords to use for censorship testing']
def processInputs(self):
if 'backend' in self.localOptions:
self.url = self.localOptions['backend']
else:
raise Exception("No backend specified")
def test_get(self):
"""
Perform a HTTP GET request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="GET", body=self.input)
def test_post(self):
"""
Perform a HTTP POST request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="POST", body=self.input)
|
Add keyword filtering test file
|
Add keyword filtering test file
|
Python
|
bsd-2-clause
|
juga0/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe
|
Add keyword filtering test file
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class KeywordFiltering(httpt.HTTPTest):
"""
This test involves performing HTTP requests containing to be tested for
censorship keywords.
"""
name = "Keyword Filtering"
author = "Arturo Filastò"
version = 0.1
optParameters = [['backend', 'b', None, 'URL of the backend system to use for testing']]
inputFile = ['file', 'f', None, 'List of keywords to use for censorship testing']
def processInputs(self):
if 'backend' in self.localOptions:
self.url = self.localOptions['backend']
else:
raise Exception("No backend specified")
def test_get(self):
"""
Perform a HTTP GET request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="GET", body=self.input)
def test_post(self):
"""
Perform a HTTP POST request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="POST", body=self.input)
|
<commit_before><commit_msg>Add keyword filtering test file<commit_after>
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class KeywordFiltering(httpt.HTTPTest):
"""
This test involves performing HTTP requests containing to be tested for
censorship keywords.
"""
name = "Keyword Filtering"
author = "Arturo Filastò"
version = 0.1
optParameters = [['backend', 'b', None, 'URL of the backend system to use for testing']]
inputFile = ['file', 'f', None, 'List of keywords to use for censorship testing']
def processInputs(self):
if 'backend' in self.localOptions:
self.url = self.localOptions['backend']
else:
raise Exception("No backend specified")
def test_get(self):
"""
Perform a HTTP GET request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="GET", body=self.input)
def test_post(self):
"""
Perform a HTTP POST request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="POST", body=self.input)
|
Add keyword filtering test file# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class KeywordFiltering(httpt.HTTPTest):
"""
This test involves performing HTTP requests containing to be tested for
censorship keywords.
"""
name = "Keyword Filtering"
author = "Arturo Filastò"
version = 0.1
optParameters = [['backend', 'b', None, 'URL of the backend system to use for testing']]
inputFile = ['file', 'f', None, 'List of keywords to use for censorship testing']
def processInputs(self):
if 'backend' in self.localOptions:
self.url = self.localOptions['backend']
else:
raise Exception("No backend specified")
def test_get(self):
"""
Perform a HTTP GET request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="GET", body=self.input)
def test_post(self):
"""
Perform a HTTP POST request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="POST", body=self.input)
|
<commit_before><commit_msg>Add keyword filtering test file<commit_after># -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class KeywordFiltering(httpt.HTTPTest):
"""
This test involves performing HTTP requests containing to be tested for
censorship keywords.
"""
name = "Keyword Filtering"
author = "Arturo Filastò"
version = 0.1
optParameters = [['backend', 'b', None, 'URL of the backend system to use for testing']]
inputFile = ['file', 'f', None, 'List of keywords to use for censorship testing']
def processInputs(self):
if 'backend' in self.localOptions:
self.url = self.localOptions['backend']
else:
raise Exception("No backend specified")
def test_get(self):
"""
Perform a HTTP GET request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="GET", body=self.input)
def test_post(self):
"""
Perform a HTTP POST request to the backend containing the keyword to be
tested inside of the request body.
"""
return self.doRequest(self.url, method="POST", body=self.input)
|
|
7113374b0eab84769fe452ad124d8e082bb11923
|
py/median-of-two-sorted-arrays.py
|
py/median-of-two-sorted-arrays.py
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
l1, l2 = len(nums1), len(nums2)
m = min(nums1[:1] + nums2[:1])
M = max(nums1[-1:] + nums2[-1:])
# 0123456789
# .4.5.6.
# .1.2.3.
L, U = 0, l1 * 2 + 1
while L + 1 < U:
mid1 = (L + U) / 2
mid2 = l1 + l2 - mid1
L1 = m if mid1 == 0 else nums1[(mid1 - 1) / 2]
R1 = M if mid1 == 2 * l1 else nums1[mid1 / 2]
L2 = m if mid2 == 0 else nums2[(mid2 - 1) / 2]
R2 = M if mid2 == 2 * l2 else nums2[mid2 / 2]
if L1 > R2:
U = mid1
else:
L = mid1
C1, C2 = L, l1 + l2 - L
L1 = m if C1 == 0 else nums1[(C1 - 1) / 2]
R1 = M if C1 == 2 * l1 else nums1[C1 / 2]
L2 = m if C2 == 0 else nums2[(C2 - 1) / 2]
R2 = M if C2 == 2 * l2 else nums2[C2 / 2]
return (max(L1, L2) + min(R1, R2)) / 2.
|
Add py solution for 4. Median of Two Sorted Arrays
|
Add py solution for 4. Median of Two Sorted Arrays
4. Median of Two Sorted Arrays: https://leetcode.com/problems/median-of-two-sorted-arrays/
Reference: https://discuss.leetcode.com/topic/16797/very-concise-o-log-min-m-n-iterative-solution-with-detailed-explanation
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 4. Median of Two Sorted Arrays
4. Median of Two Sorted Arrays: https://leetcode.com/problems/median-of-two-sorted-arrays/
Reference: https://discuss.leetcode.com/topic/16797/very-concise-o-log-min-m-n-iterative-solution-with-detailed-explanation
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
l1, l2 = len(nums1), len(nums2)
m = min(nums1[:1] + nums2[:1])
M = max(nums1[-1:] + nums2[-1:])
# 0123456789
# .4.5.6.
# .1.2.3.
L, U = 0, l1 * 2 + 1
while L + 1 < U:
mid1 = (L + U) / 2
mid2 = l1 + l2 - mid1
L1 = m if mid1 == 0 else nums1[(mid1 - 1) / 2]
R1 = M if mid1 == 2 * l1 else nums1[mid1 / 2]
L2 = m if mid2 == 0 else nums2[(mid2 - 1) / 2]
R2 = M if mid2 == 2 * l2 else nums2[mid2 / 2]
if L1 > R2:
U = mid1
else:
L = mid1
C1, C2 = L, l1 + l2 - L
L1 = m if C1 == 0 else nums1[(C1 - 1) / 2]
R1 = M if C1 == 2 * l1 else nums1[C1 / 2]
L2 = m if C2 == 0 else nums2[(C2 - 1) / 2]
R2 = M if C2 == 2 * l2 else nums2[C2 / 2]
return (max(L1, L2) + min(R1, R2)) / 2.
|
<commit_before><commit_msg>Add py solution for 4. Median of Two Sorted Arrays
4. Median of Two Sorted Arrays: https://leetcode.com/problems/median-of-two-sorted-arrays/
Reference: https://discuss.leetcode.com/topic/16797/very-concise-o-log-min-m-n-iterative-solution-with-detailed-explanation<commit_after>
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
l1, l2 = len(nums1), len(nums2)
m = min(nums1[:1] + nums2[:1])
M = max(nums1[-1:] + nums2[-1:])
# 0123456789
# .4.5.6.
# .1.2.3.
L, U = 0, l1 * 2 + 1
while L + 1 < U:
mid1 = (L + U) / 2
mid2 = l1 + l2 - mid1
L1 = m if mid1 == 0 else nums1[(mid1 - 1) / 2]
R1 = M if mid1 == 2 * l1 else nums1[mid1 / 2]
L2 = m if mid2 == 0 else nums2[(mid2 - 1) / 2]
R2 = M if mid2 == 2 * l2 else nums2[mid2 / 2]
if L1 > R2:
U = mid1
else:
L = mid1
C1, C2 = L, l1 + l2 - L
L1 = m if C1 == 0 else nums1[(C1 - 1) / 2]
R1 = M if C1 == 2 * l1 else nums1[C1 / 2]
L2 = m if C2 == 0 else nums2[(C2 - 1) / 2]
R2 = M if C2 == 2 * l2 else nums2[C2 / 2]
return (max(L1, L2) + min(R1, R2)) / 2.
|
Add py solution for 4. Median of Two Sorted Arrays
4. Median of Two Sorted Arrays: https://leetcode.com/problems/median-of-two-sorted-arrays/
Reference: https://discuss.leetcode.com/topic/16797/very-concise-o-log-min-m-n-iterative-solution-with-detailed-explanationclass Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
l1, l2 = len(nums1), len(nums2)
m = min(nums1[:1] + nums2[:1])
M = max(nums1[-1:] + nums2[-1:])
# 0123456789
# .4.5.6.
# .1.2.3.
L, U = 0, l1 * 2 + 1
while L + 1 < U:
mid1 = (L + U) / 2
mid2 = l1 + l2 - mid1
L1 = m if mid1 == 0 else nums1[(mid1 - 1) / 2]
R1 = M if mid1 == 2 * l1 else nums1[mid1 / 2]
L2 = m if mid2 == 0 else nums2[(mid2 - 1) / 2]
R2 = M if mid2 == 2 * l2 else nums2[mid2 / 2]
if L1 > R2:
U = mid1
else:
L = mid1
C1, C2 = L, l1 + l2 - L
L1 = m if C1 == 0 else nums1[(C1 - 1) / 2]
R1 = M if C1 == 2 * l1 else nums1[C1 / 2]
L2 = m if C2 == 0 else nums2[(C2 - 1) / 2]
R2 = M if C2 == 2 * l2 else nums2[C2 / 2]
return (max(L1, L2) + min(R1, R2)) / 2.
|
<commit_before><commit_msg>Add py solution for 4. Median of Two Sorted Arrays
4. Median of Two Sorted Arrays: https://leetcode.com/problems/median-of-two-sorted-arrays/
Reference: https://discuss.leetcode.com/topic/16797/very-concise-o-log-min-m-n-iterative-solution-with-detailed-explanation<commit_after>class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
l1, l2 = len(nums1), len(nums2)
m = min(nums1[:1] + nums2[:1])
M = max(nums1[-1:] + nums2[-1:])
# 0123456789
# .4.5.6.
# .1.2.3.
L, U = 0, l1 * 2 + 1
while L + 1 < U:
mid1 = (L + U) / 2
mid2 = l1 + l2 - mid1
L1 = m if mid1 == 0 else nums1[(mid1 - 1) / 2]
R1 = M if mid1 == 2 * l1 else nums1[mid1 / 2]
L2 = m if mid2 == 0 else nums2[(mid2 - 1) / 2]
R2 = M if mid2 == 2 * l2 else nums2[mid2 / 2]
if L1 > R2:
U = mid1
else:
L = mid1
C1, C2 = L, l1 + l2 - L
L1 = m if C1 == 0 else nums1[(C1 - 1) / 2]
R1 = M if C1 == 2 * l1 else nums1[C1 / 2]
L2 = m if C2 == 0 else nums2[(C2 - 1) / 2]
R2 = M if C2 == 2 * l2 else nums2[C2 / 2]
return (max(L1, L2) + min(R1, R2)) / 2.
|
|
29cfeae39f819c34f5a1beb033dffe537da9b9eb
|
karma-histogram.py
|
karma-histogram.py
|
import praw
import praw.helpers
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Plot karma distributions of comments in a post')
parser.add_argument('--link', dest='link', required=True, help='Link of the post')
parser.add_argument('--expand_more_comments', dest='expand_more_comments', action='store_true', default=False, help='Link of the post')
args = parser.parse_args()
def plot_karma(karma):
# the histogram of the data
n, bins, patches = plt.hist(karma, 100, facecolor='green', alpha=0.75)
plt.xlabel('Karma')
plt.ylabel('Post count')
plt.gca().set_yscale('log')
plt.grid(True)
plt.show()
def get_every_comments_karma(r, submission_link, expand_more_comments):
submission = praw.objects.Submission.from_url(r, submission_link, comment_limit=None)
if expand_more_comments:
submission.replace_more_comments(limit=None, threshold=0)
return [(comment.score if 'score' in dir(comment) else 0) for comment in praw.helpers.flatten_tree(submission.comments)]
r = praw.Reddit(user_agent='karma distribution a sub by /u/godlikesme')
karma = get_every_comments_karma(r, args.link, args.expand_more_comments)
plot_karma(karma)
|
Add script for plotting karma distribution in a post
|
Add script for plotting karma distribution in a post
|
Python
|
mit
|
eleweek/redditbots
|
Add script for plotting karma distribution in a post
|
import praw
import praw.helpers
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Plot karma distributions of comments in a post')
parser.add_argument('--link', dest='link', required=True, help='Link of the post')
parser.add_argument('--expand_more_comments', dest='expand_more_comments', action='store_true', default=False, help='Link of the post')
args = parser.parse_args()
def plot_karma(karma):
# the histogram of the data
n, bins, patches = plt.hist(karma, 100, facecolor='green', alpha=0.75)
plt.xlabel('Karma')
plt.ylabel('Post count')
plt.gca().set_yscale('log')
plt.grid(True)
plt.show()
def get_every_comments_karma(r, submission_link, expand_more_comments):
submission = praw.objects.Submission.from_url(r, submission_link, comment_limit=None)
if expand_more_comments:
submission.replace_more_comments(limit=None, threshold=0)
return [(comment.score if 'score' in dir(comment) else 0) for comment in praw.helpers.flatten_tree(submission.comments)]
r = praw.Reddit(user_agent='karma distribution a sub by /u/godlikesme')
karma = get_every_comments_karma(r, args.link, args.expand_more_comments)
plot_karma(karma)
|
<commit_before><commit_msg>Add script for plotting karma distribution in a post<commit_after>
|
import praw
import praw.helpers
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Plot karma distributions of comments in a post')
parser.add_argument('--link', dest='link', required=True, help='Link of the post')
parser.add_argument('--expand_more_comments', dest='expand_more_comments', action='store_true', default=False, help='Link of the post')
args = parser.parse_args()
def plot_karma(karma):
# the histogram of the data
n, bins, patches = plt.hist(karma, 100, facecolor='green', alpha=0.75)
plt.xlabel('Karma')
plt.ylabel('Post count')
plt.gca().set_yscale('log')
plt.grid(True)
plt.show()
def get_every_comments_karma(r, submission_link, expand_more_comments):
submission = praw.objects.Submission.from_url(r, submission_link, comment_limit=None)
if expand_more_comments:
submission.replace_more_comments(limit=None, threshold=0)
return [(comment.score if 'score' in dir(comment) else 0) for comment in praw.helpers.flatten_tree(submission.comments)]
r = praw.Reddit(user_agent='karma distribution a sub by /u/godlikesme')
karma = get_every_comments_karma(r, args.link, args.expand_more_comments)
plot_karma(karma)
|
Add script for plotting karma distribution in a postimport praw
import praw.helpers
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Plot karma distributions of comments in a post')
parser.add_argument('--link', dest='link', required=True, help='Link of the post')
parser.add_argument('--expand_more_comments', dest='expand_more_comments', action='store_true', default=False, help='Link of the post')
args = parser.parse_args()
def plot_karma(karma):
# the histogram of the data
n, bins, patches = plt.hist(karma, 100, facecolor='green', alpha=0.75)
plt.xlabel('Karma')
plt.ylabel('Post count')
plt.gca().set_yscale('log')
plt.grid(True)
plt.show()
def get_every_comments_karma(r, submission_link, expand_more_comments):
submission = praw.objects.Submission.from_url(r, submission_link, comment_limit=None)
if expand_more_comments:
submission.replace_more_comments(limit=None, threshold=0)
return [(comment.score if 'score' in dir(comment) else 0) for comment in praw.helpers.flatten_tree(submission.comments)]
r = praw.Reddit(user_agent='karma distribution a sub by /u/godlikesme')
karma = get_every_comments_karma(r, args.link, args.expand_more_comments)
plot_karma(karma)
|
<commit_before><commit_msg>Add script for plotting karma distribution in a post<commit_after>import praw
import praw.helpers
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='Plot karma distributions of comments in a post')
parser.add_argument('--link', dest='link', required=True, help='Link of the post')
parser.add_argument('--expand_more_comments', dest='expand_more_comments', action='store_true', default=False, help='Link of the post')
args = parser.parse_args()
def plot_karma(karma):
# the histogram of the data
n, bins, patches = plt.hist(karma, 100, facecolor='green', alpha=0.75)
plt.xlabel('Karma')
plt.ylabel('Post count')
plt.gca().set_yscale('log')
plt.grid(True)
plt.show()
def get_every_comments_karma(r, submission_link, expand_more_comments):
submission = praw.objects.Submission.from_url(r, submission_link, comment_limit=None)
if expand_more_comments:
submission.replace_more_comments(limit=None, threshold=0)
return [(comment.score if 'score' in dir(comment) else 0) for comment in praw.helpers.flatten_tree(submission.comments)]
r = praw.Reddit(user_agent='karma distribution a sub by /u/godlikesme')
karma = get_every_comments_karma(r, args.link, args.expand_more_comments)
plot_karma(karma)
|
|
9928ee0bced08c57cf2125eac6538b8bc6ade9ff
|
paystackapi/tests/test_tcontrol.py
|
paystackapi/tests/test_tcontrol.py
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.tcontrol import TransferControl
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_check_balance(self):
"""Method defined to test check_balance."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/balance"),
content_type='text/json',
body='{"status": true, "message": "Balances retrieved"}',
status=201,
)
response = TransferControl.check_balance()
self.assertTrue(response['status'])
|
Add test for transfer control check balance
|
Add test for transfer control check balance
|
Python
|
mit
|
andela-sjames/paystack-python
|
Add test for transfer control check balance
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.tcontrol import TransferControl
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_check_balance(self):
"""Method defined to test check_balance."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/balance"),
content_type='text/json',
body='{"status": true, "message": "Balances retrieved"}',
status=201,
)
response = TransferControl.check_balance()
self.assertTrue(response['status'])
|
<commit_before><commit_msg>Add test for transfer control check balance<commit_after>
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.tcontrol import TransferControl
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_check_balance(self):
"""Method defined to test check_balance."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/balance"),
content_type='text/json',
body='{"status": true, "message": "Balances retrieved"}',
status=201,
)
response = TransferControl.check_balance()
self.assertTrue(response['status'])
|
Add test for transfer control check balanceimport httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.tcontrol import TransferControl
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_check_balance(self):
"""Method defined to test check_balance."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/balance"),
content_type='text/json',
body='{"status": true, "message": "Balances retrieved"}',
status=201,
)
response = TransferControl.check_balance()
self.assertTrue(response['status'])
|
<commit_before><commit_msg>Add test for transfer control check balance<commit_after>import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.tcontrol import TransferControl
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_check_balance(self):
"""Method defined to test check_balance."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/balance"),
content_type='text/json',
body='{"status": true, "message": "Balances retrieved"}',
status=201,
)
response = TransferControl.check_balance()
self.assertTrue(response['status'])
|
|
31b70041a9fc7da87774bffd59a9f93917200f18
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.5,<1.6',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.7.1,<1.8',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
Upgrade PySocks to fix Python 3.10 compatbility
|
Upgrade PySocks to fix Python 3.10 compatbility
|
Python
|
mit
|
tonyseek/rsocks,tonyseek/rsocks
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.5,<1.6',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
Upgrade PySocks to fix Python 3.10 compatbility
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.7.1,<1.8',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
<commit_before>from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.5,<1.6',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
<commit_msg>Upgrade PySocks to fix Python 3.10 compatbility<commit_after>
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.7.1,<1.8',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.5,<1.6',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
Upgrade PySocks to fix Python 3.10 compatbilityfrom setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.7.1,<1.8',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
<commit_before>from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.5,<1.6',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
<commit_msg>Upgrade PySocks to fix Python 3.10 compatbility<commit_after>from setuptools import setup, find_packages
with open('README.rst') as readme:
long_description = ''.join(readme).strip()
setup(
name='rsocks',
version='0.3.3',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='A SOCKS reverse proxy server.',
long_description=long_description,
url='https://github.com/tonyseek/rsocks',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages(),
platforms=['Any'],
install_requires=[
'PySocks>=1.7.1,<1.8',
'eventlet>=0.31,<0.34',
'click>=3.3,<3.4',
'toml.py>=0.1,<0.2',
'six',
],
entry_points={
'console_scripts': [
'rsocks=rsocks.cli:main',
],
},
)
|
fc708cd68ec189f15b84a49329076fa0be922ea8
|
python/S02/projets/QCM2.py
|
python/S02/projets/QCM2.py
|
"""
QCM Version n°2
"""
import random
def afficher_question(question):
""" Affiche les informations de la question : question, réponses et points
que l'on peut gagner.
"""
print("### QUESTION ###")
print(question['question'])
for index, reponse in enumerate(question['réponses']):
print(f'{index+1} - {reponse[0]}')
print(f"Cette question rapporte {question['score']} points.\n")
def valider_reponse(reponses):
""" Demande à l'utilisateur d'entrer une réponse.
True : La réponse est correcte
False : La réponse n'est pas correcte
-1 : Fin du programme
"""
while True:
choix = input("Quel est votre choix? (N° de la réponse) ")
if choix == 'stop':
return -1
try:
choix = int(choix)
if 0 < choix <= len(reponses):
return reponses[choix-1][1]
else:
print("Cette réponse n'existe pas.")
except ValueError:
print("Vous devez entrer un nombre.")
if __name__ == '__main__':
questions = [
{
'question': "Quelle est la couleur du cheval blanc d'Henri IV?",
'réponses': [
("Blanche", True),
("Noire", False),
("Rose", False),
("Marron", False)
],
'score': 1
},
{
'question': "Quel est le plus haut mont de France?",
'réponses': [
("Canigou", False),
("Pic Saint Loup", False),
("Montagne Noire", False),
("Mont Blanc", True)
],
'score': 10
},
]
score = 0
while True:
question = random.choice(questions)
afficher_question(question)
reponse = valider_reponse(question['réponses'])
if reponse == -1:
break
elif reponse == 1:
score += question['score']
print("VRAI")
else:
print("FAUX")
print(f"\n### SCORE ###\nPoints : {score}\n")
print("Le programme est maintenant terminé. Merci d'avoir participé.")
|
Add the second version of QCM
|
Add the second version of QCM
|
Python
|
mit
|
DocWinter/BiB-Workshops,DocWinter/BiB-Workshops,DocWinter/BiB-Workshops
|
Add the second version of QCM
|
"""
QCM Version n°2
"""
import random
def afficher_question(question):
""" Affiche les informations de la question : question, réponses et points
que l'on peut gagner.
"""
print("### QUESTION ###")
print(question['question'])
for index, reponse in enumerate(question['réponses']):
print(f'{index+1} - {reponse[0]}')
print(f"Cette question rapporte {question['score']} points.\n")
def valider_reponse(reponses):
""" Demande à l'utilisateur d'entrer une réponse.
True : La réponse est correcte
False : La réponse n'est pas correcte
-1 : Fin du programme
"""
while True:
choix = input("Quel est votre choix? (N° de la réponse) ")
if choix == 'stop':
return -1
try:
choix = int(choix)
if 0 < choix <= len(reponses):
return reponses[choix-1][1]
else:
print("Cette réponse n'existe pas.")
except ValueError:
print("Vous devez entrer un nombre.")
if __name__ == '__main__':
questions = [
{
'question': "Quelle est la couleur du cheval blanc d'Henri IV?",
'réponses': [
("Blanche", True),
("Noire", False),
("Rose", False),
("Marron", False)
],
'score': 1
},
{
'question': "Quel est le plus haut mont de France?",
'réponses': [
("Canigou", False),
("Pic Saint Loup", False),
("Montagne Noire", False),
("Mont Blanc", True)
],
'score': 10
},
]
score = 0
while True:
question = random.choice(questions)
afficher_question(question)
reponse = valider_reponse(question['réponses'])
if reponse == -1:
break
elif reponse == 1:
score += question['score']
print("VRAI")
else:
print("FAUX")
print(f"\n### SCORE ###\nPoints : {score}\n")
print("Le programme est maintenant terminé. Merci d'avoir participé.")
|
<commit_before><commit_msg>Add the second version of QCM<commit_after>
|
"""
QCM Version n°2
"""
import random
def afficher_question(question):
""" Affiche les informations de la question : question, réponses et points
que l'on peut gagner.
"""
print("### QUESTION ###")
print(question['question'])
for index, reponse in enumerate(question['réponses']):
print(f'{index+1} - {reponse[0]}')
print(f"Cette question rapporte {question['score']} points.\n")
def valider_reponse(reponses):
""" Demande à l'utilisateur d'entrer une réponse.
True : La réponse est correcte
False : La réponse n'est pas correcte
-1 : Fin du programme
"""
while True:
choix = input("Quel est votre choix? (N° de la réponse) ")
if choix == 'stop':
return -1
try:
choix = int(choix)
if 0 < choix <= len(reponses):
return reponses[choix-1][1]
else:
print("Cette réponse n'existe pas.")
except ValueError:
print("Vous devez entrer un nombre.")
if __name__ == '__main__':
questions = [
{
'question': "Quelle est la couleur du cheval blanc d'Henri IV?",
'réponses': [
("Blanche", True),
("Noire", False),
("Rose", False),
("Marron", False)
],
'score': 1
},
{
'question': "Quel est le plus haut mont de France?",
'réponses': [
("Canigou", False),
("Pic Saint Loup", False),
("Montagne Noire", False),
("Mont Blanc", True)
],
'score': 10
},
]
score = 0
while True:
question = random.choice(questions)
afficher_question(question)
reponse = valider_reponse(question['réponses'])
if reponse == -1:
break
elif reponse == 1:
score += question['score']
print("VRAI")
else:
print("FAUX")
print(f"\n### SCORE ###\nPoints : {score}\n")
print("Le programme est maintenant terminé. Merci d'avoir participé.")
|
Add the second version of QCM"""
QCM Version n°2
"""
import random
def afficher_question(question):
""" Affiche les informations de la question : question, réponses et points
que l'on peut gagner.
"""
print("### QUESTION ###")
print(question['question'])
for index, reponse in enumerate(question['réponses']):
print(f'{index+1} - {reponse[0]}')
print(f"Cette question rapporte {question['score']} points.\n")
def valider_reponse(reponses):
""" Demande à l'utilisateur d'entrer une réponse.
True : La réponse est correcte
False : La réponse n'est pas correcte
-1 : Fin du programme
"""
while True:
choix = input("Quel est votre choix? (N° de la réponse) ")
if choix == 'stop':
return -1
try:
choix = int(choix)
if 0 < choix <= len(reponses):
return reponses[choix-1][1]
else:
print("Cette réponse n'existe pas.")
except ValueError:
print("Vous devez entrer un nombre.")
if __name__ == '__main__':
questions = [
{
'question': "Quelle est la couleur du cheval blanc d'Henri IV?",
'réponses': [
("Blanche", True),
("Noire", False),
("Rose", False),
("Marron", False)
],
'score': 1
},
{
'question': "Quel est le plus haut mont de France?",
'réponses': [
("Canigou", False),
("Pic Saint Loup", False),
("Montagne Noire", False),
("Mont Blanc", True)
],
'score': 10
},
]
score = 0
while True:
question = random.choice(questions)
afficher_question(question)
reponse = valider_reponse(question['réponses'])
if reponse == -1:
break
elif reponse == 1:
score += question['score']
print("VRAI")
else:
print("FAUX")
print(f"\n### SCORE ###\nPoints : {score}\n")
print("Le programme est maintenant terminé. Merci d'avoir participé.")
|
<commit_before><commit_msg>Add the second version of QCM<commit_after>"""
QCM Version n°2
"""
import random
def afficher_question(question):
""" Affiche les informations de la question : question, réponses et points
que l'on peut gagner.
"""
print("### QUESTION ###")
print(question['question'])
for index, reponse in enumerate(question['réponses']):
print(f'{index+1} - {reponse[0]}')
print(f"Cette question rapporte {question['score']} points.\n")
def valider_reponse(reponses):
""" Demande à l'utilisateur d'entrer une réponse.
True : La réponse est correcte
False : La réponse n'est pas correcte
-1 : Fin du programme
"""
while True:
choix = input("Quel est votre choix? (N° de la réponse) ")
if choix == 'stop':
return -1
try:
choix = int(choix)
if 0 < choix <= len(reponses):
return reponses[choix-1][1]
else:
print("Cette réponse n'existe pas.")
except ValueError:
print("Vous devez entrer un nombre.")
if __name__ == '__main__':
questions = [
{
'question': "Quelle est la couleur du cheval blanc d'Henri IV?",
'réponses': [
("Blanche", True),
("Noire", False),
("Rose", False),
("Marron", False)
],
'score': 1
},
{
'question': "Quel est le plus haut mont de France?",
'réponses': [
("Canigou", False),
("Pic Saint Loup", False),
("Montagne Noire", False),
("Mont Blanc", True)
],
'score': 10
},
]
score = 0
while True:
question = random.choice(questions)
afficher_question(question)
reponse = valider_reponse(question['réponses'])
if reponse == -1:
break
elif reponse == 1:
score += question['score']
print("VRAI")
else:
print("FAUX")
print(f"\n### SCORE ###\nPoints : {score}\n")
print("Le programme est maintenant terminé. Merci d'avoir participé.")
|
|
db80f597fe9e9b78eab31a2a2e3881ae5f238d32
|
logging_example.py
|
logging_example.py
|
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s] [%(levelname)s] %(message)s')
from collector import GraphiteCollector
server = GraphiteCollector("shamir.wu", prefix="myPrefix", delay=20)
@server.metric()
def myMetric():
return 1234
if __name__ == '__main__':
server.feed()
|
Add an example with logging
|
Add an example with logging
|
Python
|
mit
|
C4ptainCrunch/graphite_feeder.py
|
Add an example with logging
|
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s] [%(levelname)s] %(message)s')
from collector import GraphiteCollector
server = GraphiteCollector("shamir.wu", prefix="myPrefix", delay=20)
@server.metric()
def myMetric():
return 1234
if __name__ == '__main__':
server.feed()
|
<commit_before><commit_msg>Add an example with logging<commit_after>
|
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s] [%(levelname)s] %(message)s')
from collector import GraphiteCollector
server = GraphiteCollector("shamir.wu", prefix="myPrefix", delay=20)
@server.metric()
def myMetric():
return 1234
if __name__ == '__main__':
server.feed()
|
Add an example with loggingimport logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s] [%(levelname)s] %(message)s')
from collector import GraphiteCollector
server = GraphiteCollector("shamir.wu", prefix="myPrefix", delay=20)
@server.metric()
def myMetric():
return 1234
if __name__ == '__main__':
server.feed()
|
<commit_before><commit_msg>Add an example with logging<commit_after>import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s] [%(levelname)s] %(message)s')
from collector import GraphiteCollector
server = GraphiteCollector("shamir.wu", prefix="myPrefix", delay=20)
@server.metric()
def myMetric():
return 1234
if __name__ == '__main__':
server.feed()
|
|
53b7e6e41867298b1546093aaf6bdf1e3163d9ca
|
skimage/measure/tests/test_fit.py
|
skimage/measure/tests/test_fit.py
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_is_degenerate():
assert_equal(LineModel().is_degenerate(np.empty((1, 2))), True)
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equals original parameters
assert_almost_equal(model0._params, model_est._params, 1)
if __name__ == "__main__":
np.testing.run_module_suite()
|
Add test cases for line model
|
Add test cases for line model
|
Python
|
bsd-3-clause
|
keflavich/scikit-image,SamHames/scikit-image,SamHames/scikit-image,rjeli/scikit-image,Britefury/scikit-image,emon10005/scikit-image,ClinicalGraphics/scikit-image,ClinicalGraphics/scikit-image,juliusbierk/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,paalge/scikit-image,GaZ3ll3/scikit-image,ofgulban/scikit-image,ajaybhat/scikit-image,keflavich/scikit-image,youprofit/scikit-image,WarrenWeckesser/scikits-image,SamHames/scikit-image,vighneshbirodkar/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,almarklein/scikit-image,ofgulban/scikit-image,chintak/scikit-image,warmspringwinds/scikit-image,blink1073/scikit-image,emon10005/scikit-image,Hiyorimi/scikit-image,juliusbierk/scikit-image,dpshelio/scikit-image,ofgulban/scikit-image,Hiyorimi/scikit-image,jwiggins/scikit-image,michaelpacer/scikit-image,Midafi/scikit-image,bsipocz/scikit-image,newville/scikit-image,pratapvardhan/scikit-image,dpshelio/scikit-image,pratapvardhan/scikit-image,paalge/scikit-image,Midafi/scikit-image,ajaybhat/scikit-image,youprofit/scikit-image,GaZ3ll3/scikit-image,jwiggins/scikit-image,vighneshbirodkar/scikit-image,almarklein/scikit-image,newville/scikit-image,SamHames/scikit-image,bennlich/scikit-image,robintw/scikit-image,chriscrosscutler/scikit-image,oew1v07/scikit-image,almarklein/scikit-image,bsipocz/scikit-image,michaelpacer/scikit-image,oew1v07/scikit-image,michaelaye/scikit-image,rjeli/scikit-image,almarklein/scikit-image,chriscrosscutler/scikit-image,robintw/scikit-image,paalge/scikit-image,rjeli/scikit-image,blink1073/scikit-image,michaelaye/scikit-image,chintak/scikit-image,bennlich/scikit-image,vighneshbirodkar/scikit-image,Britefury/scikit-image
|
Add test cases for line model
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_is_degenerate():
assert_equal(LineModel().is_degenerate(np.empty((1, 2))), True)
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equals original parameters
assert_almost_equal(model0._params, model_est._params, 1)
if __name__ == "__main__":
np.testing.run_module_suite()
|
<commit_before><commit_msg>Add test cases for line model<commit_after>
|
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_is_degenerate():
assert_equal(LineModel().is_degenerate(np.empty((1, 2))), True)
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equals original parameters
assert_almost_equal(model0._params, model_est._params, 1)
if __name__ == "__main__":
np.testing.run_module_suite()
|
Add test cases for line modelimport numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_is_degenerate():
assert_equal(LineModel().is_degenerate(np.empty((1, 2))), True)
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equals original parameters
assert_almost_equal(model0._params, model_est._params, 1)
if __name__ == "__main__":
np.testing.run_module_suite()
|
<commit_before><commit_msg>Add test cases for line model<commit_after>import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModel, CircleModel, EllipseModel
def test_line_model_invalid_input():
assert_raises(ValueError, LineModel().estimate, np.empty((5, 3)))
def test_line_model_predict():
model = LineModel()
model._params = (10, 1)
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_is_degenerate():
assert_equal(LineModel().is_degenerate(np.empty((1, 2))), True)
def test_line_model_estimate():
# generate original data without noise
model0 = LineModel()
model0._params = (10, 1)
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data0 = np.column_stack([x0, y0])
# add gaussian noise to data
np.random.seed(1234)
data = data0 + np.random.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModel()
model_est.estimate(data)
# test whether estimated parameters almost equals original parameters
assert_almost_equal(model0._params, model_est._params, 1)
if __name__ == "__main__":
np.testing.run_module_suite()
|
|
e19dc2b980366baf9051cc2596244063c3bb3091
|
scripts/make-base-localizations.py
|
scripts/make-base-localizations.py
|
#!/usr/bin/env python
"""
make-base-localizations.py
Created by Alkis Evlogimenos on 2009-03-28.
"""
from glob import iglob
from itertools import chain
import logging
import os.path
import re
import sys
def FindEPGPRootDir():
if os.path.isfile('epgp.toc'):
return '.'
elif os.path.isfile('../epgp.toc'):
return '..'
else:
raise Exception, 'EPGP root not found!'
_LOCALIZED_STRING_RE = re.compile(r'L\[.*\]')
def main():
strings = []
base_dir = FindEPGPRootDir()
logging.info('Extracting localization strings from files')
for file in chain(iglob(os.path.join(base_dir, '*.lua')),
iglob(os.path.join(base_dir, '*.xml'))):
text = open(file).read()
localized_strings = _LOCALIZED_STRING_RE.findall(text)
strings.extend(localized_strings)
logging.info('Uniquifying strings')
strings = list(set(strings))
logging.info('Sorting strings')
strings.sort()
filename = os.path.join(base_dir, 'localization', 'localization.enUS.lua')
logging.info('Writing %s' % filename)
file = open(filename, 'w')
file.writelines([
'local L = LibStub("AceLocale-3.0"):NewLocale("EPGP", "enUS", true)',
'\n',
'if not L then return end',
'\n',
'\n',
])
for string in strings:
file.write(string)
file.write(' = true\n')
file.close()
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
logging.getLogger().setLevel(logging.INFO)
sys.exit(main())
|
Add script that scans all lua files, extracts localizations and write a base enUS localization file.
|
Add script that scans all lua files, extracts localizations and write a base enUS localization file.
|
Python
|
bsd-3-clause
|
sheldon/epgp,sheldon/epgp,protomech/epgp-dkp-reloaded,protomech/epgp-dkp-reloaded,ceason/epgp-tfatf,hayword/tfatf_epgp,hayword/tfatf_epgp,ceason/epgp-tfatf
|
Add script that scans all lua files, extracts localizations and write a base enUS localization file.
|
#!/usr/bin/env python
"""
make-base-localizations.py
Created by Alkis Evlogimenos on 2009-03-28.
"""
from glob import iglob
from itertools import chain
import logging
import os.path
import re
import sys
def FindEPGPRootDir():
if os.path.isfile('epgp.toc'):
return '.'
elif os.path.isfile('../epgp.toc'):
return '..'
else:
raise Exception, 'EPGP root not found!'
_LOCALIZED_STRING_RE = re.compile(r'L\[.*\]')
def main():
strings = []
base_dir = FindEPGPRootDir()
logging.info('Extracting localization strings from files')
for file in chain(iglob(os.path.join(base_dir, '*.lua')),
iglob(os.path.join(base_dir, '*.xml'))):
text = open(file).read()
localized_strings = _LOCALIZED_STRING_RE.findall(text)
strings.extend(localized_strings)
logging.info('Uniquifying strings')
strings = list(set(strings))
logging.info('Sorting strings')
strings.sort()
filename = os.path.join(base_dir, 'localization', 'localization.enUS.lua')
logging.info('Writing %s' % filename)
file = open(filename, 'w')
file.writelines([
'local L = LibStub("AceLocale-3.0"):NewLocale("EPGP", "enUS", true)',
'\n',
'if not L then return end',
'\n',
'\n',
])
for string in strings:
file.write(string)
file.write(' = true\n')
file.close()
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
logging.getLogger().setLevel(logging.INFO)
sys.exit(main())
|
<commit_before><commit_msg>Add script that scans all lua files, extracts localizations and write a base enUS localization file.<commit_after>
|
#!/usr/bin/env python
"""
make-base-localizations.py
Created by Alkis Evlogimenos on 2009-03-28.
"""
from glob import iglob
from itertools import chain
import logging
import os.path
import re
import sys
def FindEPGPRootDir():
if os.path.isfile('epgp.toc'):
return '.'
elif os.path.isfile('../epgp.toc'):
return '..'
else:
raise Exception, 'EPGP root not found!'
_LOCALIZED_STRING_RE = re.compile(r'L\[.*\]')
def main():
strings = []
base_dir = FindEPGPRootDir()
logging.info('Extracting localization strings from files')
for file in chain(iglob(os.path.join(base_dir, '*.lua')),
iglob(os.path.join(base_dir, '*.xml'))):
text = open(file).read()
localized_strings = _LOCALIZED_STRING_RE.findall(text)
strings.extend(localized_strings)
logging.info('Uniquifying strings')
strings = list(set(strings))
logging.info('Sorting strings')
strings.sort()
filename = os.path.join(base_dir, 'localization', 'localization.enUS.lua')
logging.info('Writing %s' % filename)
file = open(filename, 'w')
file.writelines([
'local L = LibStub("AceLocale-3.0"):NewLocale("EPGP", "enUS", true)',
'\n',
'if not L then return end',
'\n',
'\n',
])
for string in strings:
file.write(string)
file.write(' = true\n')
file.close()
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
logging.getLogger().setLevel(logging.INFO)
sys.exit(main())
|
Add script that scans all lua files, extracts localizations and write a base enUS localization file.#!/usr/bin/env python
"""
make-base-localizations.py
Created by Alkis Evlogimenos on 2009-03-28.
"""
from glob import iglob
from itertools import chain
import logging
import os.path
import re
import sys
def FindEPGPRootDir():
if os.path.isfile('epgp.toc'):
return '.'
elif os.path.isfile('../epgp.toc'):
return '..'
else:
raise Exception, 'EPGP root not found!'
_LOCALIZED_STRING_RE = re.compile(r'L\[.*\]')
def main():
strings = []
base_dir = FindEPGPRootDir()
logging.info('Extracting localization strings from files')
for file in chain(iglob(os.path.join(base_dir, '*.lua')),
iglob(os.path.join(base_dir, '*.xml'))):
text = open(file).read()
localized_strings = _LOCALIZED_STRING_RE.findall(text)
strings.extend(localized_strings)
logging.info('Uniquifying strings')
strings = list(set(strings))
logging.info('Sorting strings')
strings.sort()
filename = os.path.join(base_dir, 'localization', 'localization.enUS.lua')
logging.info('Writing %s' % filename)
file = open(filename, 'w')
file.writelines([
'local L = LibStub("AceLocale-3.0"):NewLocale("EPGP", "enUS", true)',
'\n',
'if not L then return end',
'\n',
'\n',
])
for string in strings:
file.write(string)
file.write(' = true\n')
file.close()
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
logging.getLogger().setLevel(logging.INFO)
sys.exit(main())
|
<commit_before><commit_msg>Add script that scans all lua files, extracts localizations and write a base enUS localization file.<commit_after>#!/usr/bin/env python
"""
make-base-localizations.py
Created by Alkis Evlogimenos on 2009-03-28.
"""
from glob import iglob
from itertools import chain
import logging
import os.path
import re
import sys
def FindEPGPRootDir():
if os.path.isfile('epgp.toc'):
return '.'
elif os.path.isfile('../epgp.toc'):
return '..'
else:
raise Exception, 'EPGP root not found!'
_LOCALIZED_STRING_RE = re.compile(r'L\[.*\]')
def main():
strings = []
base_dir = FindEPGPRootDir()
logging.info('Extracting localization strings from files')
for file in chain(iglob(os.path.join(base_dir, '*.lua')),
iglob(os.path.join(base_dir, '*.xml'))):
text = open(file).read()
localized_strings = _LOCALIZED_STRING_RE.findall(text)
strings.extend(localized_strings)
logging.info('Uniquifying strings')
strings = list(set(strings))
logging.info('Sorting strings')
strings.sort()
filename = os.path.join(base_dir, 'localization', 'localization.enUS.lua')
logging.info('Writing %s' % filename)
file = open(filename, 'w')
file.writelines([
'local L = LibStub("AceLocale-3.0"):NewLocale("EPGP", "enUS", true)',
'\n',
'if not L then return end',
'\n',
'\n',
])
for string in strings:
file.write(string)
file.write(' = true\n')
file.close()
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
logging.getLogger().setLevel(logging.INFO)
sys.exit(main())
|
|
25f424a8d8328b7e869b06a9bfa4a891580a8960
|
lib/history_widget.py
|
lib/history_widget.py
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
if address is None:
address = "Unknown"
if amount is None:
amount = "Unknown"
if date is None:
date = "Unknown"
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
Fix for slush's problem, perhaps
|
Fix for slush's problem, perhaps
|
Python
|
mit
|
dabura667/electrum,spesmilo/electrum,dabura667/electrum,kyuupichan/electrum,cryptapus/electrum,wakiyamap/electrum-mona,fireduck64/electrum,digitalbitbox/electrum,fujicoin/electrum-fjc,neocogent/electrum,argentumproject/electrum-arg,dabura667/electrum,pooler/electrum-ltc,fyookball/electrum,fireduck64/electrum,FairCoinTeam/electrum-fair,pknight007/electrum-vtc,procrasti/electrum,neocogent/electrum,cryptapus/electrum-myr,argentumproject/electrum-arg,kyuupichan/electrum,fireduck64/electrum,spesmilo/electrum,vertcoin/electrum-vtc,asfin/electrum,argentumproject/electrum-arg,fyookball/electrum,cryptapus/electrum-uno,cryptapus/electrum-myr,FairCoinTeam/electrum-fair,dabura667/electrum,protonn/Electrum-Cash,pooler/electrum-ltc,wakiyamap/electrum-mona,fireduck64/electrum,digitalbitbox/electrum,imrehg/electrum,wakiyamap/electrum-mona,FairCoinTeam/electrum-fair,aasiutin/electrum,romanz/electrum,pknight007/electrum-vtc,pooler/electrum-ltc,kyuupichan/electrum,pknight007/electrum-vtc,aasiutin/electrum,pooler/electrum-ltc,protonn/Electrum-Cash,neocogent/electrum,imrehg/electrum,molecular/electrum,procrasti/electrum,aasiutin/electrum,cryptapus/electrum,vertcoin/electrum-vtc,dashpay/electrum-dash,protonn/Electrum-Cash,fyookball/electrum,digitalbitbox/electrum,cryptapus/electrum-uno,fujicoin/electrum-fjc,dashpay/electrum-dash,vertcoin/electrum-vtc,aasiutin/electrum,dashpay/electrum-dash,argentumproject/electrum-arg,lbryio/lbryum,wakiyamap/electrum-mona,imrehg/electrum,fujicoin/electrum-fjc,molecular/electrum,procrasti/electrum,romanz/electrum,pknight007/electrum-vtc,protonn/Electrum-Cash,cryptapus/electrum-myr,molecular/electrum,imrehg/electrum,cryptapus/electrum-uno,romanz/electrum,procrasti/electrum,vialectrum/vialectrum,asfin/electrum,vialectrum/vialectrum,spesmilo/electrum,cryptapus/electrum,asfin/electrum,vertcoin/electrum-vtc,cryptapus/electrum-myr,digitalbitbox/electrum,vialectrum/vialectrum,FairCoinTeam/electrum-fair,dashpay/electrum-dash,cryptapus/electrum-uno,molecular/electrum,lbryio/lbryum,spesmilo/electrum
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
Fix for slush's problem, perhaps
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
if address is None:
address = "Unknown"
if amount is None:
amount = "Unknown"
if date is None:
date = "Unknown"
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
<commit_before>from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
<commit_msg>Fix for slush's problem, perhaps<commit_after>
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
if address is None:
address = "Unknown"
if amount is None:
amount = "Unknown"
if date is None:
date = "Unknown"
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
Fix for slush's problem, perhapsfrom PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
if address is None:
address = "Unknown"
if amount is None:
amount = "Unknown"
if date is None:
date = "Unknown"
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
<commit_before>from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
<commit_msg>Fix for slush's problem, perhaps<commit_after>from PyQt4.QtGui import *
from i18n import _
class HistoryWidget(QTreeWidget):
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.setColumnCount(2)
self.setHeaderLabels([_("Amount"), _("To / From"), _("When")])
self.setIndentation(0)
def empty(self):
self.clear()
def append(self, address, amount, date):
if address is None:
address = "Unknown"
if amount is None:
amount = "Unknown"
if date is None:
date = "Unknown"
item = QTreeWidgetItem([amount, address, date])
self.insertTopLevelItem(0, item)
|
654b1dd1d9ab3d86be7ab3ca1842157c5e42b66d
|
tests/automated/test_model_Building.py
|
tests/automated/test_model_Building.py
|
from django.test import TestCase
from complaints.models import Building
class BuildingTestCase(TestCase):
""" Tests for the Building model.
"""
def setUp(self):
self.b1_name = 'The University of Toronto',
self.b1_civic_address = '27 King\'s College Circle',
self.b1_city = 'Toronto',
self.b1_province = 'Ontario',
self.b1_latitude = 43.6611024,
self.b1_longitude = -79.39592909999999
self.b2_name = '#32 27 King\'s College Circle',
Building.objects.create(
name=self.b1_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
Building.objects.create(
name=self.b2_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
def test_existence(self):
"""
Test if building exists in the database.
"""
building = Building.objects.get(id=1)
self.assertTrue(building)
self.assertEqual(self.b1_name, building.name)
self.assertEqual(self.b1_civic_address, building.civic_address)
self.assertEqual(self.b1_city, building.city)
self.assertEqual(self.b1_province, building.province)
self.assertEqual(self.b1_latitude, building.latitude)
self.assertEqual(self.b1_longitude, building.longitude)
def test_valid_slug_simple(self):
"""
Test if slug fits pattern.
"""
building = Building.objects.get(id=1)
expected = 'the-university-of-toronto'
self.assertEqual(expected, building.slug)
def test_valid_slug_complex(self):
"""
Test if slug of name with symbols fits pattern.
"""
building = Building.objects.get(id=2)
expected = '32-27-kings-college-circle'
self.assertEqual(expected, building.slug)
def test_absolute_url_simple(self):
"""
Test if URL of building fits pattern.
"""
building = Building.objects.get(id=1)
expected = '/building/1/the-university-of-toronto/'
self.assertEqual(expected, building.get_absolute_url())
def test_absolute_url_complex(self):
"""
Test if URL of building that is not first and has more complicated name
fits pattern.
"""
building = Building.objects.get(id=2)
expected = '/building/2/32-27-kings-college-circle/'
self.assertEqual(expected, building.get_absolute_url())
|
Add tests for Building model
|
Add tests for Building model
|
Python
|
mit
|
CSC301H-Fall2013/healthyhome,CSC301H-Fall2013/healthyhome
|
Add tests for Building model
|
from django.test import TestCase
from complaints.models import Building
class BuildingTestCase(TestCase):
""" Tests for the Building model.
"""
def setUp(self):
self.b1_name = 'The University of Toronto',
self.b1_civic_address = '27 King\'s College Circle',
self.b1_city = 'Toronto',
self.b1_province = 'Ontario',
self.b1_latitude = 43.6611024,
self.b1_longitude = -79.39592909999999
self.b2_name = '#32 27 King\'s College Circle',
Building.objects.create(
name=self.b1_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
Building.objects.create(
name=self.b2_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
def test_existence(self):
"""
Test if building exists in the database.
"""
building = Building.objects.get(id=1)
self.assertTrue(building)
self.assertEqual(self.b1_name, building.name)
self.assertEqual(self.b1_civic_address, building.civic_address)
self.assertEqual(self.b1_city, building.city)
self.assertEqual(self.b1_province, building.province)
self.assertEqual(self.b1_latitude, building.latitude)
self.assertEqual(self.b1_longitude, building.longitude)
def test_valid_slug_simple(self):
"""
Test if slug fits pattern.
"""
building = Building.objects.get(id=1)
expected = 'the-university-of-toronto'
self.assertEqual(expected, building.slug)
def test_valid_slug_complex(self):
"""
Test if slug of name with symbols fits pattern.
"""
building = Building.objects.get(id=2)
expected = '32-27-kings-college-circle'
self.assertEqual(expected, building.slug)
def test_absolute_url_simple(self):
"""
Test if URL of building fits pattern.
"""
building = Building.objects.get(id=1)
expected = '/building/1/the-university-of-toronto/'
self.assertEqual(expected, building.get_absolute_url())
def test_absolute_url_complex(self):
"""
Test if URL of building that is not first and has more complicated name
fits pattern.
"""
building = Building.objects.get(id=2)
expected = '/building/2/32-27-kings-college-circle/'
self.assertEqual(expected, building.get_absolute_url())
|
<commit_before><commit_msg>Add tests for Building model<commit_after>
|
from django.test import TestCase
from complaints.models import Building
class BuildingTestCase(TestCase):
""" Tests for the Building model.
"""
def setUp(self):
self.b1_name = 'The University of Toronto',
self.b1_civic_address = '27 King\'s College Circle',
self.b1_city = 'Toronto',
self.b1_province = 'Ontario',
self.b1_latitude = 43.6611024,
self.b1_longitude = -79.39592909999999
self.b2_name = '#32 27 King\'s College Circle',
Building.objects.create(
name=self.b1_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
Building.objects.create(
name=self.b2_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
def test_existence(self):
"""
Test if building exists in the database.
"""
building = Building.objects.get(id=1)
self.assertTrue(building)
self.assertEqual(self.b1_name, building.name)
self.assertEqual(self.b1_civic_address, building.civic_address)
self.assertEqual(self.b1_city, building.city)
self.assertEqual(self.b1_province, building.province)
self.assertEqual(self.b1_latitude, building.latitude)
self.assertEqual(self.b1_longitude, building.longitude)
def test_valid_slug_simple(self):
"""
Test if slug fits pattern.
"""
building = Building.objects.get(id=1)
expected = 'the-university-of-toronto'
self.assertEqual(expected, building.slug)
def test_valid_slug_complex(self):
"""
Test if slug of name with symbols fits pattern.
"""
building = Building.objects.get(id=2)
expected = '32-27-kings-college-circle'
self.assertEqual(expected, building.slug)
def test_absolute_url_simple(self):
"""
Test if URL of building fits pattern.
"""
building = Building.objects.get(id=1)
expected = '/building/1/the-university-of-toronto/'
self.assertEqual(expected, building.get_absolute_url())
def test_absolute_url_complex(self):
"""
Test if URL of building that is not first and has more complicated name
fits pattern.
"""
building = Building.objects.get(id=2)
expected = '/building/2/32-27-kings-college-circle/'
self.assertEqual(expected, building.get_absolute_url())
|
Add tests for Building modelfrom django.test import TestCase
from complaints.models import Building
class BuildingTestCase(TestCase):
""" Tests for the Building model.
"""
def setUp(self):
self.b1_name = 'The University of Toronto',
self.b1_civic_address = '27 King\'s College Circle',
self.b1_city = 'Toronto',
self.b1_province = 'Ontario',
self.b1_latitude = 43.6611024,
self.b1_longitude = -79.39592909999999
self.b2_name = '#32 27 King\'s College Circle',
Building.objects.create(
name=self.b1_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
Building.objects.create(
name=self.b2_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
def test_existence(self):
"""
Test if building exists in the database.
"""
building = Building.objects.get(id=1)
self.assertTrue(building)
self.assertEqual(self.b1_name, building.name)
self.assertEqual(self.b1_civic_address, building.civic_address)
self.assertEqual(self.b1_city, building.city)
self.assertEqual(self.b1_province, building.province)
self.assertEqual(self.b1_latitude, building.latitude)
self.assertEqual(self.b1_longitude, building.longitude)
def test_valid_slug_simple(self):
"""
Test if slug fits pattern.
"""
building = Building.objects.get(id=1)
expected = 'the-university-of-toronto'
self.assertEqual(expected, building.slug)
def test_valid_slug_complex(self):
"""
Test if slug of name with symbols fits pattern.
"""
building = Building.objects.get(id=2)
expected = '32-27-kings-college-circle'
self.assertEqual(expected, building.slug)
def test_absolute_url_simple(self):
"""
Test if URL of building fits pattern.
"""
building = Building.objects.get(id=1)
expected = '/building/1/the-university-of-toronto/'
self.assertEqual(expected, building.get_absolute_url())
def test_absolute_url_complex(self):
"""
Test if URL of building that is not first and has more complicated name
fits pattern.
"""
building = Building.objects.get(id=2)
expected = '/building/2/32-27-kings-college-circle/'
self.assertEqual(expected, building.get_absolute_url())
|
<commit_before><commit_msg>Add tests for Building model<commit_after>from django.test import TestCase
from complaints.models import Building
class BuildingTestCase(TestCase):
""" Tests for the Building model.
"""
def setUp(self):
self.b1_name = 'The University of Toronto',
self.b1_civic_address = '27 King\'s College Circle',
self.b1_city = 'Toronto',
self.b1_province = 'Ontario',
self.b1_latitude = 43.6611024,
self.b1_longitude = -79.39592909999999
self.b2_name = '#32 27 King\'s College Circle',
Building.objects.create(
name=self.b1_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
Building.objects.create(
name=self.b2_name,
civic_address=self.b1_civic_address,
city=self.b1_city,
province=self.b1_province,
latitude=self.b1_latitude,
longitude=self.b1_longitude,
)
def test_existence(self):
"""
Test if building exists in the database.
"""
building = Building.objects.get(id=1)
self.assertTrue(building)
self.assertEqual(self.b1_name, building.name)
self.assertEqual(self.b1_civic_address, building.civic_address)
self.assertEqual(self.b1_city, building.city)
self.assertEqual(self.b1_province, building.province)
self.assertEqual(self.b1_latitude, building.latitude)
self.assertEqual(self.b1_longitude, building.longitude)
def test_valid_slug_simple(self):
"""
Test if slug fits pattern.
"""
building = Building.objects.get(id=1)
expected = 'the-university-of-toronto'
self.assertEqual(expected, building.slug)
def test_valid_slug_complex(self):
"""
Test if slug of name with symbols fits pattern.
"""
building = Building.objects.get(id=2)
expected = '32-27-kings-college-circle'
self.assertEqual(expected, building.slug)
def test_absolute_url_simple(self):
"""
Test if URL of building fits pattern.
"""
building = Building.objects.get(id=1)
expected = '/building/1/the-university-of-toronto/'
self.assertEqual(expected, building.get_absolute_url())
def test_absolute_url_complex(self):
"""
Test if URL of building that is not first and has more complicated name
fits pattern.
"""
building = Building.objects.get(id=2)
expected = '/building/2/32-27-kings-college-circle/'
self.assertEqual(expected, building.get_absolute_url())
|
|
6fc1ba22e93711e6edb5d4b71516cfc8a91e3333
|
tests/models/spells/test_dot_schema.py
|
tests/models/spells/test_dot_schema.py
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.spell_dots import DotSchema
from buffs import DoT
from damage import Damage
class DotSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the DotSchema attributes are as expected
Test the convert_to_dot_object function
"""
self.entry = 1
self.name = 'Melting'
self.damage = Damage(magic_dmg=2)
self.duration = 2
def test_schema_attributes(self):
loaded_schema: DotSchema = session.query(DotSchema).get(self.entry)
self.assertIsNotNone(loaded_schema)
self.assertTrue(loaded_schema.entry, int)
self.assertTrue(loaded_schema.name, str)
self.assertTrue(loaded_schema.damage_per_tick, int)
self.assertTrue(loaded_schema.damage_school, str)
self.assertTrue(loaded_schema.duration, int)
self.assertTrue(loaded_schema.comment, str)
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
Test for the DotSchema class
|
Test for the DotSchema class
|
Python
|
mit
|
Enether/python_wow
|
Test for the DotSchema class
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.spell_dots import DotSchema
from buffs import DoT
from damage import Damage
class DotSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the DotSchema attributes are as expected
Test the convert_to_dot_object function
"""
self.entry = 1
self.name = 'Melting'
self.damage = Damage(magic_dmg=2)
self.duration = 2
def test_schema_attributes(self):
loaded_schema: DotSchema = session.query(DotSchema).get(self.entry)
self.assertIsNotNone(loaded_schema)
self.assertTrue(loaded_schema.entry, int)
self.assertTrue(loaded_schema.name, str)
self.assertTrue(loaded_schema.damage_per_tick, int)
self.assertTrue(loaded_schema.damage_school, str)
self.assertTrue(loaded_schema.duration, int)
self.assertTrue(loaded_schema.comment, str)
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the DotSchema class<commit_after>
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.spell_dots import DotSchema
from buffs import DoT
from damage import Damage
class DotSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the DotSchema attributes are as expected
Test the convert_to_dot_object function
"""
self.entry = 1
self.name = 'Melting'
self.damage = Damage(magic_dmg=2)
self.duration = 2
def test_schema_attributes(self):
loaded_schema: DotSchema = session.query(DotSchema).get(self.entry)
self.assertIsNotNone(loaded_schema)
self.assertTrue(loaded_schema.entry, int)
self.assertTrue(loaded_schema.name, str)
self.assertTrue(loaded_schema.damage_per_tick, int)
self.assertTrue(loaded_schema.damage_school, str)
self.assertTrue(loaded_schema.duration, int)
self.assertTrue(loaded_schema.comment, str)
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
Test for the DotSchema classimport unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.spell_dots import DotSchema
from buffs import DoT
from damage import Damage
class DotSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the DotSchema attributes are as expected
Test the convert_to_dot_object function
"""
self.entry = 1
self.name = 'Melting'
self.damage = Damage(magic_dmg=2)
self.duration = 2
def test_schema_attributes(self):
loaded_schema: DotSchema = session.query(DotSchema).get(self.entry)
self.assertIsNotNone(loaded_schema)
self.assertTrue(loaded_schema.entry, int)
self.assertTrue(loaded_schema.name, str)
self.assertTrue(loaded_schema.damage_per_tick, int)
self.assertTrue(loaded_schema.damage_school, str)
self.assertTrue(loaded_schema.duration, int)
self.assertTrue(loaded_schema.comment, str)
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the DotSchema class<commit_after>import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.spell_dots import DotSchema
from buffs import DoT
from damage import Damage
class DotSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the DotSchema attributes are as expected
Test the convert_to_dot_object function
"""
self.entry = 1
self.name = 'Melting'
self.damage = Damage(magic_dmg=2)
self.duration = 2
def test_schema_attributes(self):
loaded_schema: DotSchema = session.query(DotSchema).get(self.entry)
self.assertIsNotNone(loaded_schema)
self.assertTrue(loaded_schema.entry, int)
self.assertTrue(loaded_schema.name, str)
self.assertTrue(loaded_schema.damage_per_tick, int)
self.assertTrue(loaded_schema.damage_school, str)
self.assertTrue(loaded_schema.duration, int)
self.assertTrue(loaded_schema.comment, str)
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
|
89c5d88c0fc624d0135d513694f5fdb3ed220b08
|
adaptive/sample_rpc.py
|
adaptive/sample_rpc.py
|
# Python-to-Python RPC using pickle and HTTP
import os, urllib2, pickle
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# Server side
class ServiceRequestHandler(BaseHTTPRequestHandler):
services = {} # name -> instance
def do_GET(self):
res = 404
error = "Not found"
body = None
components = self.path.split("/")
while components and not components[0].strip():
components = components[1:]
while components:
name = components[0].strip()
args = "/".join(components[1:])
try:
instance = ServiceRequestHandler.services[name]
except KeyError:
error = "Not found: %r" % name
break
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
res = 400
error = "Bad Request: Failed to parse operation"
break
try:
method = getattr(instance, command)
except AttributeError:
error = "Not found: %s :: %r" % (name, command)
break
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
res = 200
body = pickle.dumps(output)
error = None
break
if error:
self.send_error(res, error)
return
self.send_response(res)
self.end_headers()
if body:
self.wfile.write(body)
def add_instance(name, instance):
ServiceRequestHandler.services[name] = instance
def serve_forever():
port = int(os.environ.get("SERVER_PORT", 8080))
server = HTTPServer(("0.0.0.0", port), ServiceRequestHandler)
server.serve_forever()
# Client side
class Proxy(object):
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
url = self.client.url + "/" + pickle.dumps((self.name, args, kwargs)).encode("base64").replace("\n", "")
u = urllib2.urlopen(url)
okay, res = pickle.loads(u.read())
if okay:
return res
raise res
class Client(object):
def __init__(self, url):
self.url = url
def __getattr__(self, attr):
return Proxy(self, attr)
# Sample
if __name__ == '__main__':
class Greeting(object):
def __init__(self):
self.counter = 0
def greet(self):
self.counter += 1
return '''{"id":%s,"content":"Hello, World!"}''' % self.counter
add_instance("Greeting", Greeting())
serve_forever()
|
Add sample RPC client and server framework
|
Add sample RPC client and server framework
|
Python
|
apache-2.0
|
datawire/adaptive
|
Add sample RPC client and server framework
|
# Python-to-Python RPC using pickle and HTTP
import os, urllib2, pickle
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# Server side
class ServiceRequestHandler(BaseHTTPRequestHandler):
services = {} # name -> instance
def do_GET(self):
res = 404
error = "Not found"
body = None
components = self.path.split("/")
while components and not components[0].strip():
components = components[1:]
while components:
name = components[0].strip()
args = "/".join(components[1:])
try:
instance = ServiceRequestHandler.services[name]
except KeyError:
error = "Not found: %r" % name
break
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
res = 400
error = "Bad Request: Failed to parse operation"
break
try:
method = getattr(instance, command)
except AttributeError:
error = "Not found: %s :: %r" % (name, command)
break
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
res = 200
body = pickle.dumps(output)
error = None
break
if error:
self.send_error(res, error)
return
self.send_response(res)
self.end_headers()
if body:
self.wfile.write(body)
def add_instance(name, instance):
ServiceRequestHandler.services[name] = instance
def serve_forever():
port = int(os.environ.get("SERVER_PORT", 8080))
server = HTTPServer(("0.0.0.0", port), ServiceRequestHandler)
server.serve_forever()
# Client side
class Proxy(object):
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
url = self.client.url + "/" + pickle.dumps((self.name, args, kwargs)).encode("base64").replace("\n", "")
u = urllib2.urlopen(url)
okay, res = pickle.loads(u.read())
if okay:
return res
raise res
class Client(object):
def __init__(self, url):
self.url = url
def __getattr__(self, attr):
return Proxy(self, attr)
# Sample
if __name__ == '__main__':
class Greeting(object):
def __init__(self):
self.counter = 0
def greet(self):
self.counter += 1
return '''{"id":%s,"content":"Hello, World!"}''' % self.counter
add_instance("Greeting", Greeting())
serve_forever()
|
<commit_before><commit_msg>Add sample RPC client and server framework<commit_after>
|
# Python-to-Python RPC using pickle and HTTP
import os, urllib2, pickle
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# Server side
class ServiceRequestHandler(BaseHTTPRequestHandler):
services = {} # name -> instance
def do_GET(self):
res = 404
error = "Not found"
body = None
components = self.path.split("/")
while components and not components[0].strip():
components = components[1:]
while components:
name = components[0].strip()
args = "/".join(components[1:])
try:
instance = ServiceRequestHandler.services[name]
except KeyError:
error = "Not found: %r" % name
break
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
res = 400
error = "Bad Request: Failed to parse operation"
break
try:
method = getattr(instance, command)
except AttributeError:
error = "Not found: %s :: %r" % (name, command)
break
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
res = 200
body = pickle.dumps(output)
error = None
break
if error:
self.send_error(res, error)
return
self.send_response(res)
self.end_headers()
if body:
self.wfile.write(body)
def add_instance(name, instance):
ServiceRequestHandler.services[name] = instance
def serve_forever():
port = int(os.environ.get("SERVER_PORT", 8080))
server = HTTPServer(("0.0.0.0", port), ServiceRequestHandler)
server.serve_forever()
# Client side
class Proxy(object):
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
url = self.client.url + "/" + pickle.dumps((self.name, args, kwargs)).encode("base64").replace("\n", "")
u = urllib2.urlopen(url)
okay, res = pickle.loads(u.read())
if okay:
return res
raise res
class Client(object):
def __init__(self, url):
self.url = url
def __getattr__(self, attr):
return Proxy(self, attr)
# Sample
if __name__ == '__main__':
class Greeting(object):
def __init__(self):
self.counter = 0
def greet(self):
self.counter += 1
return '''{"id":%s,"content":"Hello, World!"}''' % self.counter
add_instance("Greeting", Greeting())
serve_forever()
|
Add sample RPC client and server framework# Python-to-Python RPC using pickle and HTTP
import os, urllib2, pickle
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# Server side
class ServiceRequestHandler(BaseHTTPRequestHandler):
services = {} # name -> instance
def do_GET(self):
res = 404
error = "Not found"
body = None
components = self.path.split("/")
while components and not components[0].strip():
components = components[1:]
while components:
name = components[0].strip()
args = "/".join(components[1:])
try:
instance = ServiceRequestHandler.services[name]
except KeyError:
error = "Not found: %r" % name
break
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
res = 400
error = "Bad Request: Failed to parse operation"
break
try:
method = getattr(instance, command)
except AttributeError:
error = "Not found: %s :: %r" % (name, command)
break
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
res = 200
body = pickle.dumps(output)
error = None
break
if error:
self.send_error(res, error)
return
self.send_response(res)
self.end_headers()
if body:
self.wfile.write(body)
def add_instance(name, instance):
ServiceRequestHandler.services[name] = instance
def serve_forever():
port = int(os.environ.get("SERVER_PORT", 8080))
server = HTTPServer(("0.0.0.0", port), ServiceRequestHandler)
server.serve_forever()
# Client side
class Proxy(object):
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
url = self.client.url + "/" + pickle.dumps((self.name, args, kwargs)).encode("base64").replace("\n", "")
u = urllib2.urlopen(url)
okay, res = pickle.loads(u.read())
if okay:
return res
raise res
class Client(object):
def __init__(self, url):
self.url = url
def __getattr__(self, attr):
return Proxy(self, attr)
# Sample
if __name__ == '__main__':
class Greeting(object):
def __init__(self):
self.counter = 0
def greet(self):
self.counter += 1
return '''{"id":%s,"content":"Hello, World!"}''' % self.counter
add_instance("Greeting", Greeting())
serve_forever()
|
<commit_before><commit_msg>Add sample RPC client and server framework<commit_after># Python-to-Python RPC using pickle and HTTP
import os, urllib2, pickle
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# Server side
class ServiceRequestHandler(BaseHTTPRequestHandler):
services = {} # name -> instance
def do_GET(self):
res = 404
error = "Not found"
body = None
components = self.path.split("/")
while components and not components[0].strip():
components = components[1:]
while components:
name = components[0].strip()
args = "/".join(components[1:])
try:
instance = ServiceRequestHandler.services[name]
except KeyError:
error = "Not found: %r" % name
break
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
res = 400
error = "Bad Request: Failed to parse operation"
break
try:
method = getattr(instance, command)
except AttributeError:
error = "Not found: %s :: %r" % (name, command)
break
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
res = 200
body = pickle.dumps(output)
error = None
break
if error:
self.send_error(res, error)
return
self.send_response(res)
self.end_headers()
if body:
self.wfile.write(body)
def add_instance(name, instance):
ServiceRequestHandler.services[name] = instance
def serve_forever():
port = int(os.environ.get("SERVER_PORT", 8080))
server = HTTPServer(("0.0.0.0", port), ServiceRequestHandler)
server.serve_forever()
# Client side
class Proxy(object):
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, *args, **kwargs):
url = self.client.url + "/" + pickle.dumps((self.name, args, kwargs)).encode("base64").replace("\n", "")
u = urllib2.urlopen(url)
okay, res = pickle.loads(u.read())
if okay:
return res
raise res
class Client(object):
def __init__(self, url):
self.url = url
def __getattr__(self, attr):
return Proxy(self, attr)
# Sample
if __name__ == '__main__':
class Greeting(object):
def __init__(self):
self.counter = 0
def greet(self):
self.counter += 1
return '''{"id":%s,"content":"Hello, World!"}''' % self.counter
add_instance("Greeting", Greeting())
serve_forever()
|
|
5ae76b2edf6b58dd093b329ff9b329d31701eb8b
|
tests/test_descriptors.py
|
tests/test_descriptors.py
|
from __future__ import absolute_import
from pytest import fixture, raises
from openvpn_status.descriptors import (
LabelProperty, name_descriptors, iter_descriptors)
@fixture
def foo_class():
@name_descriptors
class Foo(object):
foo = LabelProperty('Foo')
bar = LabelProperty('Bar', default=lambda: 0, input_type=int)
baz = property(lambda self: self.bar)
biu = ()
return Foo
def test_label_and_its_name(foo_class):
foo = foo_class()
with raises(AttributeError):
foo.foo
assert foo.bar is 0
assert foo.baz is 0
foo.foo = u'1'
foo.bar = u'2'
assert foo.foo == u'1'
assert foo.bar == 2
assert foo.baz == 2
def test_iter_descriptors(foo_class):
assert dict(iter_descriptors(foo_class)) == {
'foo': foo_class.foo,
'bar': foo_class.bar,
'baz': foo_class.baz,
}
|
Test the descriptors and its helpers
|
Test the descriptors and its helpers
|
Python
|
mit
|
tonyseek/openvpn-status
|
Test the descriptors and its helpers
|
from __future__ import absolute_import
from pytest import fixture, raises
from openvpn_status.descriptors import (
LabelProperty, name_descriptors, iter_descriptors)
@fixture
def foo_class():
@name_descriptors
class Foo(object):
foo = LabelProperty('Foo')
bar = LabelProperty('Bar', default=lambda: 0, input_type=int)
baz = property(lambda self: self.bar)
biu = ()
return Foo
def test_label_and_its_name(foo_class):
foo = foo_class()
with raises(AttributeError):
foo.foo
assert foo.bar is 0
assert foo.baz is 0
foo.foo = u'1'
foo.bar = u'2'
assert foo.foo == u'1'
assert foo.bar == 2
assert foo.baz == 2
def test_iter_descriptors(foo_class):
assert dict(iter_descriptors(foo_class)) == {
'foo': foo_class.foo,
'bar': foo_class.bar,
'baz': foo_class.baz,
}
|
<commit_before><commit_msg>Test the descriptors and its helpers<commit_after>
|
from __future__ import absolute_import
from pytest import fixture, raises
from openvpn_status.descriptors import (
LabelProperty, name_descriptors, iter_descriptors)
@fixture
def foo_class():
@name_descriptors
class Foo(object):
foo = LabelProperty('Foo')
bar = LabelProperty('Bar', default=lambda: 0, input_type=int)
baz = property(lambda self: self.bar)
biu = ()
return Foo
def test_label_and_its_name(foo_class):
foo = foo_class()
with raises(AttributeError):
foo.foo
assert foo.bar is 0
assert foo.baz is 0
foo.foo = u'1'
foo.bar = u'2'
assert foo.foo == u'1'
assert foo.bar == 2
assert foo.baz == 2
def test_iter_descriptors(foo_class):
assert dict(iter_descriptors(foo_class)) == {
'foo': foo_class.foo,
'bar': foo_class.bar,
'baz': foo_class.baz,
}
|
Test the descriptors and its helpersfrom __future__ import absolute_import
from pytest import fixture, raises
from openvpn_status.descriptors import (
LabelProperty, name_descriptors, iter_descriptors)
@fixture
def foo_class():
@name_descriptors
class Foo(object):
foo = LabelProperty('Foo')
bar = LabelProperty('Bar', default=lambda: 0, input_type=int)
baz = property(lambda self: self.bar)
biu = ()
return Foo
def test_label_and_its_name(foo_class):
foo = foo_class()
with raises(AttributeError):
foo.foo
assert foo.bar is 0
assert foo.baz is 0
foo.foo = u'1'
foo.bar = u'2'
assert foo.foo == u'1'
assert foo.bar == 2
assert foo.baz == 2
def test_iter_descriptors(foo_class):
assert dict(iter_descriptors(foo_class)) == {
'foo': foo_class.foo,
'bar': foo_class.bar,
'baz': foo_class.baz,
}
|
<commit_before><commit_msg>Test the descriptors and its helpers<commit_after>from __future__ import absolute_import
from pytest import fixture, raises
from openvpn_status.descriptors import (
LabelProperty, name_descriptors, iter_descriptors)
@fixture
def foo_class():
@name_descriptors
class Foo(object):
foo = LabelProperty('Foo')
bar = LabelProperty('Bar', default=lambda: 0, input_type=int)
baz = property(lambda self: self.bar)
biu = ()
return Foo
def test_label_and_its_name(foo_class):
foo = foo_class()
with raises(AttributeError):
foo.foo
assert foo.bar is 0
assert foo.baz is 0
foo.foo = u'1'
foo.bar = u'2'
assert foo.foo == u'1'
assert foo.bar == 2
assert foo.baz == 2
def test_iter_descriptors(foo_class):
assert dict(iter_descriptors(foo_class)) == {
'foo': foo_class.foo,
'bar': foo_class.bar,
'baz': foo_class.baz,
}
|
|
87bcfa82d9c3fc001f46af66b333f422f30068bf
|
apsuite/emit_exchange/emit_exchange.py
|
apsuite/emit_exchange/emit_exchange.py
|
import numpy as _np
import pyaccel as _pa
import pymodels as _pm
class EmittanceExchangeSimul:
def __init__(self, accelerator='bo'):
ACC_LIST = ['bo', ]
self._model = None
if accelerator not in ACC_LIST:
raise NotImplementedError(
'Simulation not implemented for the passed accelerator')
self._model = _pm.bo.create_accelerator(energy=3e9)
@property
def model(self):
return self._model
@staticmethod
def calc_emit_exchange_quality(self, emit1_0, emit2_0, emit1):
"""."""
r = 1 - (emit1 - emit2_0)/(emit1_0 - emit2_0)
return r
@staticmethod
def C_to_KsL(self, C):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0]]
beta2 = ed_tang.beta2[qs_idx[0]]
KsL = -2 * _np.pi * C / _np.sqrt(beta1 * beta2)
return KsL[0]
@staticmethod
def KsL_to_C(self, KsL):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0][0]]
beta2 = ed_tang.beta2[qs_idx[0][0]]
C = _np.abs(KsL * _np.sqrt(beta1 * beta2)/(2 * _np.pi))
return C
|
Create a class to simulate the emittance exchange dynamical process.
|
Create a class to simulate the emittance exchange dynamical process.
|
Python
|
mit
|
lnls-fac/apsuite
|
Create a class to simulate the emittance exchange dynamical process.
|
import numpy as _np
import pyaccel as _pa
import pymodels as _pm
class EmittanceExchangeSimul:
def __init__(self, accelerator='bo'):
ACC_LIST = ['bo', ]
self._model = None
if accelerator not in ACC_LIST:
raise NotImplementedError(
'Simulation not implemented for the passed accelerator')
self._model = _pm.bo.create_accelerator(energy=3e9)
@property
def model(self):
return self._model
@staticmethod
def calc_emit_exchange_quality(self, emit1_0, emit2_0, emit1):
"""."""
r = 1 - (emit1 - emit2_0)/(emit1_0 - emit2_0)
return r
@staticmethod
def C_to_KsL(self, C):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0]]
beta2 = ed_tang.beta2[qs_idx[0]]
KsL = -2 * _np.pi * C / _np.sqrt(beta1 * beta2)
return KsL[0]
@staticmethod
def KsL_to_C(self, KsL):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0][0]]
beta2 = ed_tang.beta2[qs_idx[0][0]]
C = _np.abs(KsL * _np.sqrt(beta1 * beta2)/(2 * _np.pi))
return C
|
<commit_before><commit_msg>Create a class to simulate the emittance exchange dynamical process.<commit_after>
|
import numpy as _np
import pyaccel as _pa
import pymodels as _pm
class EmittanceExchangeSimul:
def __init__(self, accelerator='bo'):
ACC_LIST = ['bo', ]
self._model = None
if accelerator not in ACC_LIST:
raise NotImplementedError(
'Simulation not implemented for the passed accelerator')
self._model = _pm.bo.create_accelerator(energy=3e9)
@property
def model(self):
return self._model
@staticmethod
def calc_emit_exchange_quality(self, emit1_0, emit2_0, emit1):
"""."""
r = 1 - (emit1 - emit2_0)/(emit1_0 - emit2_0)
return r
@staticmethod
def C_to_KsL(self, C):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0]]
beta2 = ed_tang.beta2[qs_idx[0]]
KsL = -2 * _np.pi * C / _np.sqrt(beta1 * beta2)
return KsL[0]
@staticmethod
def KsL_to_C(self, KsL):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0][0]]
beta2 = ed_tang.beta2[qs_idx[0][0]]
C = _np.abs(KsL * _np.sqrt(beta1 * beta2)/(2 * _np.pi))
return C
|
Create a class to simulate the emittance exchange dynamical process.import numpy as _np
import pyaccel as _pa
import pymodels as _pm
class EmittanceExchangeSimul:
def __init__(self, accelerator='bo'):
ACC_LIST = ['bo', ]
self._model = None
if accelerator not in ACC_LIST:
raise NotImplementedError(
'Simulation not implemented for the passed accelerator')
self._model = _pm.bo.create_accelerator(energy=3e9)
@property
def model(self):
return self._model
@staticmethod
def calc_emit_exchange_quality(self, emit1_0, emit2_0, emit1):
"""."""
r = 1 - (emit1 - emit2_0)/(emit1_0 - emit2_0)
return r
@staticmethod
def C_to_KsL(self, C):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0]]
beta2 = ed_tang.beta2[qs_idx[0]]
KsL = -2 * _np.pi * C / _np.sqrt(beta1 * beta2)
return KsL[0]
@staticmethod
def KsL_to_C(self, KsL):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0][0]]
beta2 = ed_tang.beta2[qs_idx[0][0]]
C = _np.abs(KsL * _np.sqrt(beta1 * beta2)/(2 * _np.pi))
return C
|
<commit_before><commit_msg>Create a class to simulate the emittance exchange dynamical process.<commit_after>import numpy as _np
import pyaccel as _pa
import pymodels as _pm
class EmittanceExchangeSimul:
def __init__(self, accelerator='bo'):
ACC_LIST = ['bo', ]
self._model = None
if accelerator not in ACC_LIST:
raise NotImplementedError(
'Simulation not implemented for the passed accelerator')
self._model = _pm.bo.create_accelerator(energy=3e9)
@property
def model(self):
return self._model
@staticmethod
def calc_emit_exchange_quality(self, emit1_0, emit2_0, emit1):
"""."""
r = 1 - (emit1 - emit2_0)/(emit1_0 - emit2_0)
return r
@staticmethod
def C_to_KsL(self, C):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0]]
beta2 = ed_tang.beta2[qs_idx[0]]
KsL = -2 * _np.pi * C / _np.sqrt(beta1 * beta2)
return KsL[0]
@staticmethod
def KsL_to_C(self, KsL):
fam_data = _pm.bo.get_family_data(self.model)
qs_idx = fam_data['QS']['index']
ed_tang, *_ = _pa.optics.calc_edwards_teng(accelerator=self.model)
beta1 = ed_tang.beta1[qs_idx[0][0]]
beta2 = ed_tang.beta2[qs_idx[0][0]]
C = _np.abs(KsL * _np.sqrt(beta1 * beta2)/(2 * _np.pi))
return C
|
|
9cd81da23b1c3d5ca5a9d3f805fee9ceb675ee7e
|
graystruct/__init__.py
|
graystruct/__init__.py
|
import json
import logging
import os
import socket
import zlib
from graypy.handler import SYSLOG_LEVELS, GELFHandler as BaseGELFHandler
from graypy.rabbitmq import GELFRabbitHandler as BaseGELFRabbitHandler
from structlog._frames import _find_first_app_frame_and_name
from structlog.stdlib import _NAME_TO_LEVEL
STANDARD_GELF_KEYS = (
'version',
'host',
'short_message',
'full_message',
'timestamp',
'level',
'line',
'file',
)
def _get_gelf_compatible_key(key):
if key in STANDARD_GELF_KEYS or key.startswith('_'):
return key
return '_{}'.format(key)
def add_app_context(logger, method_name, event_dict):
f, name = _find_first_app_frame_and_name(['logging', __name__])
event_dict['file'] = f.f_code.co_filename
event_dict['line'] = f.f_lineno
event_dict['function'] = f.f_code.co_name
return event_dict
class GelfJsonEncoder(object):
def __init__(self, fqdn=True, localname=None,
gelf_keys=STANDARD_GELF_KEYS):
if fqdn:
host = socket.getfqdn()
elif localname is not None:
host = localname
else:
host = socket.gethostname()
self.host = host
self.gelf_keys = frozenset(gelf_keys)
def _translate_non_gelf_keys(self, event_dict):
return {
_get_gelf_compatible_key(key): value
for key, value in event_dict.items()
}
def __call__(self, logger, method_name, event_dict):
levelno = _NAME_TO_LEVEL[method_name]
gelf_dict = {
'version': '1.1',
'host': self.host,
'level': SYSLOG_LEVELS.get(levelno, levelno),
}
if 'message' in event_dict:
message = event_dict['short_message'] = event_dict.pop('message')
else:
message = ''
if 'exception' in event_dict:
exc = event_dict.pop('exception')
event_dict['full_message'] = '\n'.join([message, exc])
gelf_dict['_pid'] = os.getpid()
gelf_dict['_logger'] = logger.name
gelf_dict['_level_name'] = logging.getLevelName(levelno)
gelf_dict.update(self._translate_non_gelf_keys(event_dict))
return json.dumps(gelf_dict)
class _CompressHandler(object):
def makePickle(self, record):
return zlib.compress(record.msg.encode('urf-8'))
class GELFHandler(_CompressHandler, BaseGELFHandler):
pass
class GELFRabbitHandler(_CompressHandler, BaseGELFRabbitHandler):
pass
|
Add initial attempt at graystruct
|
Add initial attempt at graystruct
|
Python
|
bsd-3-clause
|
enthought/graystruct
|
Add initial attempt at graystruct
|
import json
import logging
import os
import socket
import zlib
from graypy.handler import SYSLOG_LEVELS, GELFHandler as BaseGELFHandler
from graypy.rabbitmq import GELFRabbitHandler as BaseGELFRabbitHandler
from structlog._frames import _find_first_app_frame_and_name
from structlog.stdlib import _NAME_TO_LEVEL
STANDARD_GELF_KEYS = (
'version',
'host',
'short_message',
'full_message',
'timestamp',
'level',
'line',
'file',
)
def _get_gelf_compatible_key(key):
if key in STANDARD_GELF_KEYS or key.startswith('_'):
return key
return '_{}'.format(key)
def add_app_context(logger, method_name, event_dict):
f, name = _find_first_app_frame_and_name(['logging', __name__])
event_dict['file'] = f.f_code.co_filename
event_dict['line'] = f.f_lineno
event_dict['function'] = f.f_code.co_name
return event_dict
class GelfJsonEncoder(object):
def __init__(self, fqdn=True, localname=None,
gelf_keys=STANDARD_GELF_KEYS):
if fqdn:
host = socket.getfqdn()
elif localname is not None:
host = localname
else:
host = socket.gethostname()
self.host = host
self.gelf_keys = frozenset(gelf_keys)
def _translate_non_gelf_keys(self, event_dict):
return {
_get_gelf_compatible_key(key): value
for key, value in event_dict.items()
}
def __call__(self, logger, method_name, event_dict):
levelno = _NAME_TO_LEVEL[method_name]
gelf_dict = {
'version': '1.1',
'host': self.host,
'level': SYSLOG_LEVELS.get(levelno, levelno),
}
if 'message' in event_dict:
message = event_dict['short_message'] = event_dict.pop('message')
else:
message = ''
if 'exception' in event_dict:
exc = event_dict.pop('exception')
event_dict['full_message'] = '\n'.join([message, exc])
gelf_dict['_pid'] = os.getpid()
gelf_dict['_logger'] = logger.name
gelf_dict['_level_name'] = logging.getLevelName(levelno)
gelf_dict.update(self._translate_non_gelf_keys(event_dict))
return json.dumps(gelf_dict)
class _CompressHandler(object):
def makePickle(self, record):
return zlib.compress(record.msg.encode('urf-8'))
class GELFHandler(_CompressHandler, BaseGELFHandler):
pass
class GELFRabbitHandler(_CompressHandler, BaseGELFRabbitHandler):
pass
|
<commit_before><commit_msg>Add initial attempt at graystruct<commit_after>
|
import json
import logging
import os
import socket
import zlib
from graypy.handler import SYSLOG_LEVELS, GELFHandler as BaseGELFHandler
from graypy.rabbitmq import GELFRabbitHandler as BaseGELFRabbitHandler
from structlog._frames import _find_first_app_frame_and_name
from structlog.stdlib import _NAME_TO_LEVEL
STANDARD_GELF_KEYS = (
'version',
'host',
'short_message',
'full_message',
'timestamp',
'level',
'line',
'file',
)
def _get_gelf_compatible_key(key):
if key in STANDARD_GELF_KEYS or key.startswith('_'):
return key
return '_{}'.format(key)
def add_app_context(logger, method_name, event_dict):
f, name = _find_first_app_frame_and_name(['logging', __name__])
event_dict['file'] = f.f_code.co_filename
event_dict['line'] = f.f_lineno
event_dict['function'] = f.f_code.co_name
return event_dict
class GelfJsonEncoder(object):
def __init__(self, fqdn=True, localname=None,
gelf_keys=STANDARD_GELF_KEYS):
if fqdn:
host = socket.getfqdn()
elif localname is not None:
host = localname
else:
host = socket.gethostname()
self.host = host
self.gelf_keys = frozenset(gelf_keys)
def _translate_non_gelf_keys(self, event_dict):
return {
_get_gelf_compatible_key(key): value
for key, value in event_dict.items()
}
def __call__(self, logger, method_name, event_dict):
levelno = _NAME_TO_LEVEL[method_name]
gelf_dict = {
'version': '1.1',
'host': self.host,
'level': SYSLOG_LEVELS.get(levelno, levelno),
}
if 'message' in event_dict:
message = event_dict['short_message'] = event_dict.pop('message')
else:
message = ''
if 'exception' in event_dict:
exc = event_dict.pop('exception')
event_dict['full_message'] = '\n'.join([message, exc])
gelf_dict['_pid'] = os.getpid()
gelf_dict['_logger'] = logger.name
gelf_dict['_level_name'] = logging.getLevelName(levelno)
gelf_dict.update(self._translate_non_gelf_keys(event_dict))
return json.dumps(gelf_dict)
class _CompressHandler(object):
def makePickle(self, record):
return zlib.compress(record.msg.encode('urf-8'))
class GELFHandler(_CompressHandler, BaseGELFHandler):
pass
class GELFRabbitHandler(_CompressHandler, BaseGELFRabbitHandler):
pass
|
Add initial attempt at graystructimport json
import logging
import os
import socket
import zlib
from graypy.handler import SYSLOG_LEVELS, GELFHandler as BaseGELFHandler
from graypy.rabbitmq import GELFRabbitHandler as BaseGELFRabbitHandler
from structlog._frames import _find_first_app_frame_and_name
from structlog.stdlib import _NAME_TO_LEVEL
STANDARD_GELF_KEYS = (
'version',
'host',
'short_message',
'full_message',
'timestamp',
'level',
'line',
'file',
)
def _get_gelf_compatible_key(key):
if key in STANDARD_GELF_KEYS or key.startswith('_'):
return key
return '_{}'.format(key)
def add_app_context(logger, method_name, event_dict):
f, name = _find_first_app_frame_and_name(['logging', __name__])
event_dict['file'] = f.f_code.co_filename
event_dict['line'] = f.f_lineno
event_dict['function'] = f.f_code.co_name
return event_dict
class GelfJsonEncoder(object):
def __init__(self, fqdn=True, localname=None,
gelf_keys=STANDARD_GELF_KEYS):
if fqdn:
host = socket.getfqdn()
elif localname is not None:
host = localname
else:
host = socket.gethostname()
self.host = host
self.gelf_keys = frozenset(gelf_keys)
def _translate_non_gelf_keys(self, event_dict):
return {
_get_gelf_compatible_key(key): value
for key, value in event_dict.items()
}
def __call__(self, logger, method_name, event_dict):
levelno = _NAME_TO_LEVEL[method_name]
gelf_dict = {
'version': '1.1',
'host': self.host,
'level': SYSLOG_LEVELS.get(levelno, levelno),
}
if 'message' in event_dict:
message = event_dict['short_message'] = event_dict.pop('message')
else:
message = ''
if 'exception' in event_dict:
exc = event_dict.pop('exception')
event_dict['full_message'] = '\n'.join([message, exc])
gelf_dict['_pid'] = os.getpid()
gelf_dict['_logger'] = logger.name
gelf_dict['_level_name'] = logging.getLevelName(levelno)
gelf_dict.update(self._translate_non_gelf_keys(event_dict))
return json.dumps(gelf_dict)
class _CompressHandler(object):
def makePickle(self, record):
return zlib.compress(record.msg.encode('urf-8'))
class GELFHandler(_CompressHandler, BaseGELFHandler):
pass
class GELFRabbitHandler(_CompressHandler, BaseGELFRabbitHandler):
pass
|
<commit_before><commit_msg>Add initial attempt at graystruct<commit_after>import json
import logging
import os
import socket
import zlib
from graypy.handler import SYSLOG_LEVELS, GELFHandler as BaseGELFHandler
from graypy.rabbitmq import GELFRabbitHandler as BaseGELFRabbitHandler
from structlog._frames import _find_first_app_frame_and_name
from structlog.stdlib import _NAME_TO_LEVEL
STANDARD_GELF_KEYS = (
'version',
'host',
'short_message',
'full_message',
'timestamp',
'level',
'line',
'file',
)
def _get_gelf_compatible_key(key):
if key in STANDARD_GELF_KEYS or key.startswith('_'):
return key
return '_{}'.format(key)
def add_app_context(logger, method_name, event_dict):
f, name = _find_first_app_frame_and_name(['logging', __name__])
event_dict['file'] = f.f_code.co_filename
event_dict['line'] = f.f_lineno
event_dict['function'] = f.f_code.co_name
return event_dict
class GelfJsonEncoder(object):
def __init__(self, fqdn=True, localname=None,
gelf_keys=STANDARD_GELF_KEYS):
if fqdn:
host = socket.getfqdn()
elif localname is not None:
host = localname
else:
host = socket.gethostname()
self.host = host
self.gelf_keys = frozenset(gelf_keys)
def _translate_non_gelf_keys(self, event_dict):
return {
_get_gelf_compatible_key(key): value
for key, value in event_dict.items()
}
def __call__(self, logger, method_name, event_dict):
levelno = _NAME_TO_LEVEL[method_name]
gelf_dict = {
'version': '1.1',
'host': self.host,
'level': SYSLOG_LEVELS.get(levelno, levelno),
}
if 'message' in event_dict:
message = event_dict['short_message'] = event_dict.pop('message')
else:
message = ''
if 'exception' in event_dict:
exc = event_dict.pop('exception')
event_dict['full_message'] = '\n'.join([message, exc])
gelf_dict['_pid'] = os.getpid()
gelf_dict['_logger'] = logger.name
gelf_dict['_level_name'] = logging.getLevelName(levelno)
gelf_dict.update(self._translate_non_gelf_keys(event_dict))
return json.dumps(gelf_dict)
class _CompressHandler(object):
def makePickle(self, record):
return zlib.compress(record.msg.encode('urf-8'))
class GELFHandler(_CompressHandler, BaseGELFHandler):
pass
class GELFRabbitHandler(_CompressHandler, BaseGELFRabbitHandler):
pass
|
|
c1b3631efb41ccec5e3760a391b0c251946fffaa
|
Python/major_scale.py
|
Python/major_scale.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: hvn@familug.org
# Tested with Python3
# python major_scale.py C
# ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C']
import argparse
__doc__ = '''Script prints out major scale start from input note.'''
notes = ('A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab')
noteno = len(notes)
WHOLE = 2
HALF = 1
def next_steps(note, steps=HALF):
idx = notes.index(note)
for step in range(steps):
idx = (idx + 1) % noteno
return notes[idx]
def major_scale(start):
out = []
out.append(start)
n = start
for step in [WHOLE, WHOLE, HALF, WHOLE, WHOLE, WHOLE, HALF]:
n = next_steps(n, step)
out.append(n)
return out
def main():
argp = argparse.ArgumentParser()
argp.add_argument('note', help='Note starts the major scale')
args = argp.parse_args()
print(major_scale(args.note))
if __name__ == "__main__":
main()
|
Add script calculate Major scale
|
Add script calculate Major scale
|
Python
|
bsd-2-clause
|
familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG
|
Add script calculate Major scale
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: hvn@familug.org
# Tested with Python3
# python major_scale.py C
# ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C']
import argparse
__doc__ = '''Script prints out major scale start from input note.'''
notes = ('A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab')
noteno = len(notes)
WHOLE = 2
HALF = 1
def next_steps(note, steps=HALF):
idx = notes.index(note)
for step in range(steps):
idx = (idx + 1) % noteno
return notes[idx]
def major_scale(start):
out = []
out.append(start)
n = start
for step in [WHOLE, WHOLE, HALF, WHOLE, WHOLE, WHOLE, HALF]:
n = next_steps(n, step)
out.append(n)
return out
def main():
argp = argparse.ArgumentParser()
argp.add_argument('note', help='Note starts the major scale')
args = argp.parse_args()
print(major_scale(args.note))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script calculate Major scale<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: hvn@familug.org
# Tested with Python3
# python major_scale.py C
# ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C']
import argparse
__doc__ = '''Script prints out major scale start from input note.'''
notes = ('A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab')
noteno = len(notes)
WHOLE = 2
HALF = 1
def next_steps(note, steps=HALF):
idx = notes.index(note)
for step in range(steps):
idx = (idx + 1) % noteno
return notes[idx]
def major_scale(start):
out = []
out.append(start)
n = start
for step in [WHOLE, WHOLE, HALF, WHOLE, WHOLE, WHOLE, HALF]:
n = next_steps(n, step)
out.append(n)
return out
def main():
argp = argparse.ArgumentParser()
argp.add_argument('note', help='Note starts the major scale')
args = argp.parse_args()
print(major_scale(args.note))
if __name__ == "__main__":
main()
|
Add script calculate Major scale#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: hvn@familug.org
# Tested with Python3
# python major_scale.py C
# ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C']
import argparse
__doc__ = '''Script prints out major scale start from input note.'''
notes = ('A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab')
noteno = len(notes)
WHOLE = 2
HALF = 1
def next_steps(note, steps=HALF):
idx = notes.index(note)
for step in range(steps):
idx = (idx + 1) % noteno
return notes[idx]
def major_scale(start):
out = []
out.append(start)
n = start
for step in [WHOLE, WHOLE, HALF, WHOLE, WHOLE, WHOLE, HALF]:
n = next_steps(n, step)
out.append(n)
return out
def main():
argp = argparse.ArgumentParser()
argp.add_argument('note', help='Note starts the major scale')
args = argp.parse_args()
print(major_scale(args.note))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script calculate Major scale<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: hvn@familug.org
# Tested with Python3
# python major_scale.py C
# ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C']
import argparse
__doc__ = '''Script prints out major scale start from input note.'''
notes = ('A', 'Bb', 'B', 'C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab')
noteno = len(notes)
WHOLE = 2
HALF = 1
def next_steps(note, steps=HALF):
idx = notes.index(note)
for step in range(steps):
idx = (idx + 1) % noteno
return notes[idx]
def major_scale(start):
out = []
out.append(start)
n = start
for step in [WHOLE, WHOLE, HALF, WHOLE, WHOLE, WHOLE, HALF]:
n = next_steps(n, step)
out.append(n)
return out
def main():
argp = argparse.ArgumentParser()
argp.add_argument('note', help='Note starts the major scale')
args = argp.parse_args()
print(major_scale(args.note))
if __name__ == "__main__":
main()
|
|
0f0139bf8ad8a149d0c545968978c35480335312
|
lib/pegasus/python/Pegasus/test/service/monitoring/__init__.py
|
lib/pegasus/python/Pegasus/test/service/monitoring/__init__.py
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
|
Add directory where test case will be populated
|
Add directory where test case will be populated
|
Python
|
apache-2.0
|
pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus
|
Add directory where test case will be populated
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
|
<commit_before><commit_msg>Add directory where test case will be populated<commit_after>
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
|
Add directory where test case will be populated# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
|
<commit_before><commit_msg>Add directory where test case will be populated<commit_after># Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
|
|
4ac396057d88c058652cc54f2d62e69777238665
|
migrations/versions/578ce9f8d1_add_tags_and_social_profiles.py
|
migrations/versions/578ce9f8d1_add_tags_and_social_profiles.py
|
"""Add Tags and Social Profiles
Revision ID: 578ce9f8d1
Revises: 29ef29bfbe43
Create Date: 2017-12-07 19:34:45.949358
"""
# revision identifiers, used by Alembic.
revision = '578ce9f8d1'
down_revision = '29ef29bfbe43'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('social_profiles', postgresql.JSONB(astext_type=Text()), nullable=True))
op.add_column('organization', sa.Column('tags', postgresql.JSONB(astext_type=Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'tags')
op.drop_column('organization', 'social_profiles')
### end Alembic commands ###
|
Add migration for tags/social profiles
|
Add migration for tags/social profiles
Reading the documentation for [Flask-Migrate][1] seems to imply that
the way to do migrations for us is a two step process:
python app.py db migrate --message "Some message here"
then, to test the migration:
python app.py db upgrade
Make sure to commit the migration.
[1]: https://flask-migrate.readthedocs.io/en/latest/
|
Python
|
mit
|
codeforamerica/cfapi,codeforamerica/cfapi
|
Add migration for tags/social profiles
Reading the documentation for [Flask-Migrate][1] seems to imply that
the way to do migrations for us is a two step process:
python app.py db migrate --message "Some message here"
then, to test the migration:
python app.py db upgrade
Make sure to commit the migration.
[1]: https://flask-migrate.readthedocs.io/en/latest/
|
"""Add Tags and Social Profiles
Revision ID: 578ce9f8d1
Revises: 29ef29bfbe43
Create Date: 2017-12-07 19:34:45.949358
"""
# revision identifiers, used by Alembic.
revision = '578ce9f8d1'
down_revision = '29ef29bfbe43'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('social_profiles', postgresql.JSONB(astext_type=Text()), nullable=True))
op.add_column('organization', sa.Column('tags', postgresql.JSONB(astext_type=Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'tags')
op.drop_column('organization', 'social_profiles')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for tags/social profiles
Reading the documentation for [Flask-Migrate][1] seems to imply that
the way to do migrations for us is a two step process:
python app.py db migrate --message "Some message here"
then, to test the migration:
python app.py db upgrade
Make sure to commit the migration.
[1]: https://flask-migrate.readthedocs.io/en/latest/<commit_after>
|
"""Add Tags and Social Profiles
Revision ID: 578ce9f8d1
Revises: 29ef29bfbe43
Create Date: 2017-12-07 19:34:45.949358
"""
# revision identifiers, used by Alembic.
revision = '578ce9f8d1'
down_revision = '29ef29bfbe43'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('social_profiles', postgresql.JSONB(astext_type=Text()), nullable=True))
op.add_column('organization', sa.Column('tags', postgresql.JSONB(astext_type=Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'tags')
op.drop_column('organization', 'social_profiles')
### end Alembic commands ###
|
Add migration for tags/social profiles
Reading the documentation for [Flask-Migrate][1] seems to imply that
the way to do migrations for us is a two step process:
python app.py db migrate --message "Some message here"
then, to test the migration:
python app.py db upgrade
Make sure to commit the migration.
[1]: https://flask-migrate.readthedocs.io/en/latest/"""Add Tags and Social Profiles
Revision ID: 578ce9f8d1
Revises: 29ef29bfbe43
Create Date: 2017-12-07 19:34:45.949358
"""
# revision identifiers, used by Alembic.
revision = '578ce9f8d1'
down_revision = '29ef29bfbe43'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('social_profiles', postgresql.JSONB(astext_type=Text()), nullable=True))
op.add_column('organization', sa.Column('tags', postgresql.JSONB(astext_type=Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'tags')
op.drop_column('organization', 'social_profiles')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for tags/social profiles
Reading the documentation for [Flask-Migrate][1] seems to imply that
the way to do migrations for us is a two step process:
python app.py db migrate --message "Some message here"
then, to test the migration:
python app.py db upgrade
Make sure to commit the migration.
[1]: https://flask-migrate.readthedocs.io/en/latest/<commit_after>"""Add Tags and Social Profiles
Revision ID: 578ce9f8d1
Revises: 29ef29bfbe43
Create Date: 2017-12-07 19:34:45.949358
"""
# revision identifiers, used by Alembic.
revision = '578ce9f8d1'
down_revision = '29ef29bfbe43'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.types import Text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('social_profiles', postgresql.JSONB(astext_type=Text()), nullable=True))
op.add_column('organization', sa.Column('tags', postgresql.JSONB(astext_type=Text()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'tags')
op.drop_column('organization', 'social_profiles')
### end Alembic commands ###
|
|
948102f164890974dbf3f02d58a9275dcbbbd9aa
|
src/ggrc_risks/migrations/versions/20151112161029_62f26762d0a_add_missing_constraints.py
|
src/ggrc_risks/migrations/versions/20151112161029_62f26762d0a_add_missing_constraints.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc_risks.models import Threat
"""Add missing constraints
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
|
Add unique constraint to threats slug
|
Add unique constraint to threats slug
|
Python
|
apache-2.0
|
kr41/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core
|
Add unique constraint to threats slug
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc_risks.models import Threat
"""Add missing constraints
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
|
<commit_before><commit_msg>Add unique constraint to threats slug<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc_risks.models import Threat
"""Add missing constraints
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
|
Add unique constraint to threats slug# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc_risks.models import Threat
"""Add missing constraints
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
|
<commit_before><commit_msg>Add unique constraint to threats slug<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc_risks.models import Threat
"""Add missing constraints
Revision ID: 62f26762d0a
Revises: 2837682ad516
Create Date: 2015-11-12 16:10:29.579969
"""
# revision identifiers, used by Alembic.
revision = '62f26762d0a'
down_revision = '2837682ad516'
def upgrade():
resolve_duplicates(Threat, 'slug')
op.create_unique_constraint('uq_threats', 'threats', ['slug'])
def downgrade():
op.drop_constraint('uq_threats', 'threats', 'unique')
|
|
cff9f068246e6b50de6123db12b801d27f466b6d
|
corehq/messaging/scheduling/scheduling_partitioned/migrations/0009_update_custom_recipient_ids.py
|
corehq/messaging/scheduling/scheduling_partitioned/migrations/0009_update_custom_recipient_ids.py
|
# Generated by Django 2.2.24 on 2021-11-19 14:36
from django.db import migrations
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def update_custom_recipient_ids(*args, **kwargs):
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id="CASE_OWNER_LOCATION_PARENT").update(
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0008_track_attempts'),
]
operations = [migrations.RunPython(update_custom_recipient_ids)]
|
Add migration to update custom schedule instances' recipient ids
|
Add migration to update custom schedule instances' recipient ids
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration to update custom schedule instances' recipient ids
|
# Generated by Django 2.2.24 on 2021-11-19 14:36
from django.db import migrations
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def update_custom_recipient_ids(*args, **kwargs):
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id="CASE_OWNER_LOCATION_PARENT").update(
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0008_track_attempts'),
]
operations = [migrations.RunPython(update_custom_recipient_ids)]
|
<commit_before><commit_msg>Add migration to update custom schedule instances' recipient ids<commit_after>
|
# Generated by Django 2.2.24 on 2021-11-19 14:36
from django.db import migrations
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def update_custom_recipient_ids(*args, **kwargs):
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id="CASE_OWNER_LOCATION_PARENT").update(
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0008_track_attempts'),
]
operations = [migrations.RunPython(update_custom_recipient_ids)]
|
Add migration to update custom schedule instances' recipient ids# Generated by Django 2.2.24 on 2021-11-19 14:36
from django.db import migrations
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def update_custom_recipient_ids(*args, **kwargs):
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id="CASE_OWNER_LOCATION_PARENT").update(
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0008_track_attempts'),
]
operations = [migrations.RunPython(update_custom_recipient_ids)]
|
<commit_before><commit_msg>Add migration to update custom schedule instances' recipient ids<commit_after># Generated by Django 2.2.24 on 2021-11-19 14:36
from django.db import migrations
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseTimedScheduleInstance
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
def update_custom_recipient_ids(*args, **kwargs):
for db in get_db_aliases_for_partitioned_query():
CaseTimedScheduleInstance.objects.using(db).filter(recipient_id="CASE_OWNER_LOCATION_PARENT").update(
recipient_id='MOBILE_WORKER_CASE_OWNER_LOCATION_PARENT'
)
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0008_track_attempts'),
]
operations = [migrations.RunPython(update_custom_recipient_ids)]
|
|
85c34957a1f7fdb75fbdf9b72bb26169effe7f0d
|
billing/templatetags/jinja2_tags.py
|
billing/templatetags/jinja2_tags.py
|
from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
|
Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
|
Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
This reverts commit 94199d05795323c2747a7ce2e671d92f3c0be91f.
|
Python
|
bsd-3-clause
|
spookylukey/merchant,digideskio/merchant,spookylukey/merchant,agiliq/merchant,mjrulesamrat/merchant,biddyweb/merchant,agiliq/merchant,biddyweb/merchant,mjrulesamrat/merchant,digideskio/merchant
|
Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
This reverts commit 94199d05795323c2747a7ce2e671d92f3c0be91f.
|
from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
|
<commit_before><commit_msg>Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
This reverts commit 94199d05795323c2747a7ce2e671d92f3c0be91f.<commit_after>
|
from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
|
Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
This reverts commit 94199d05795323c2747a7ce2e671d92f3c0be91f.from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
|
<commit_before><commit_msg>Revert "Revert "Added an extension to allow rendering of integration objects in a Jinja2 template.""
This reverts commit 94199d05795323c2747a7ce2e671d92f3c0be91f.<commit_after>from coffin.template import Library
from django.template.loader import render_to_string
from jinja2 import nodes
from jinja2.ext import Extension
register = Library()
class MerchantExtension(Extension):
tags = set(['render_integration'])
def parse(self, parser):
stream = parser.stream
lineno = stream.next().lineno
obj = parser.parse_expression()
call_node = self.call_method('render_integration', args=[obj])
return nodes.Output([call_node]).set_lineno(lineno)
@classmethod
def render_integration(self, obj):
form_str = render_to_string(obj.template, {'integration': obj})
return form_str
register.tag(MerchantExtension)
|
|
062f62bdf4cd05c6f92f6a7c0a2e0c278a00f954
|
string/test3.py
|
string/test3.py
|
#!/usr/local/bin/python
#print 'Pirce is %d'% 43
#print 'Pirce is %x'% 43
#print 'Pirce is %o'% 43
#from math import pi
#print 'Pi is %.2f'%pi
#print 'Repr %r'%42L
#print 'Str %s'%42L
#print '%10.2f'% 1.334
#print '%-10.2f'% 2.334
#print '%+10.3f'% 3.334
#print '%010.2f'% 4.334
#print '% 10.2f'% 5.334
#print '%.*s' %(5,'Cplusplus hiee')
#print "hihihi ads sad fd a".find('ads')
#print "dasfja safj nf f".find('i')
#print 'asdhi!!! fa '.find('!!!',0,9)
#a=['1','2','3','4','5']
#b='|'.join(a)
#print b
#print 'afkafljAAAAA'.lower()
#print 'afsf'.title()
#c='abcdefg'
#print 'e' in c
#print 'avbdasd'.replace('a','x')
#print 'a|b|d|e'.split('|')
from string import maketrans
table=maketrans('acs','bde')
print 'acs acs'.translate(table,' ')
|
Use upper,split,translate and so on.
|
Use upper,split,translate and so on.
|
Python
|
apache-2.0
|
Vayne-Lover/Python
|
Use upper,split,translate and so on.
|
#!/usr/local/bin/python
#print 'Pirce is %d'% 43
#print 'Pirce is %x'% 43
#print 'Pirce is %o'% 43
#from math import pi
#print 'Pi is %.2f'%pi
#print 'Repr %r'%42L
#print 'Str %s'%42L
#print '%10.2f'% 1.334
#print '%-10.2f'% 2.334
#print '%+10.3f'% 3.334
#print '%010.2f'% 4.334
#print '% 10.2f'% 5.334
#print '%.*s' %(5,'Cplusplus hiee')
#print "hihihi ads sad fd a".find('ads')
#print "dasfja safj nf f".find('i')
#print 'asdhi!!! fa '.find('!!!',0,9)
#a=['1','2','3','4','5']
#b='|'.join(a)
#print b
#print 'afkafljAAAAA'.lower()
#print 'afsf'.title()
#c='abcdefg'
#print 'e' in c
#print 'avbdasd'.replace('a','x')
#print 'a|b|d|e'.split('|')
from string import maketrans
table=maketrans('acs','bde')
print 'acs acs'.translate(table,' ')
|
<commit_before><commit_msg>Use upper,split,translate and so on.<commit_after>
|
#!/usr/local/bin/python
#print 'Pirce is %d'% 43
#print 'Pirce is %x'% 43
#print 'Pirce is %o'% 43
#from math import pi
#print 'Pi is %.2f'%pi
#print 'Repr %r'%42L
#print 'Str %s'%42L
#print '%10.2f'% 1.334
#print '%-10.2f'% 2.334
#print '%+10.3f'% 3.334
#print '%010.2f'% 4.334
#print '% 10.2f'% 5.334
#print '%.*s' %(5,'Cplusplus hiee')
#print "hihihi ads sad fd a".find('ads')
#print "dasfja safj nf f".find('i')
#print 'asdhi!!! fa '.find('!!!',0,9)
#a=['1','2','3','4','5']
#b='|'.join(a)
#print b
#print 'afkafljAAAAA'.lower()
#print 'afsf'.title()
#c='abcdefg'
#print 'e' in c
#print 'avbdasd'.replace('a','x')
#print 'a|b|d|e'.split('|')
from string import maketrans
table=maketrans('acs','bde')
print 'acs acs'.translate(table,' ')
|
Use upper,split,translate and so on.#!/usr/local/bin/python
#print 'Pirce is %d'% 43
#print 'Pirce is %x'% 43
#print 'Pirce is %o'% 43
#from math import pi
#print 'Pi is %.2f'%pi
#print 'Repr %r'%42L
#print 'Str %s'%42L
#print '%10.2f'% 1.334
#print '%-10.2f'% 2.334
#print '%+10.3f'% 3.334
#print '%010.2f'% 4.334
#print '% 10.2f'% 5.334
#print '%.*s' %(5,'Cplusplus hiee')
#print "hihihi ads sad fd a".find('ads')
#print "dasfja safj nf f".find('i')
#print 'asdhi!!! fa '.find('!!!',0,9)
#a=['1','2','3','4','5']
#b='|'.join(a)
#print b
#print 'afkafljAAAAA'.lower()
#print 'afsf'.title()
#c='abcdefg'
#print 'e' in c
#print 'avbdasd'.replace('a','x')
#print 'a|b|d|e'.split('|')
from string import maketrans
table=maketrans('acs','bde')
print 'acs acs'.translate(table,' ')
|
<commit_before><commit_msg>Use upper,split,translate and so on.<commit_after>#!/usr/local/bin/python
#print 'Pirce is %d'% 43
#print 'Pirce is %x'% 43
#print 'Pirce is %o'% 43
#from math import pi
#print 'Pi is %.2f'%pi
#print 'Repr %r'%42L
#print 'Str %s'%42L
#print '%10.2f'% 1.334
#print '%-10.2f'% 2.334
#print '%+10.3f'% 3.334
#print '%010.2f'% 4.334
#print '% 10.2f'% 5.334
#print '%.*s' %(5,'Cplusplus hiee')
#print "hihihi ads sad fd a".find('ads')
#print "dasfja safj nf f".find('i')
#print 'asdhi!!! fa '.find('!!!',0,9)
#a=['1','2','3','4','5']
#b='|'.join(a)
#print b
#print 'afkafljAAAAA'.lower()
#print 'afsf'.title()
#c='abcdefg'
#print 'e' in c
#print 'avbdasd'.replace('a','x')
#print 'a|b|d|e'.split('|')
from string import maketrans
table=maketrans('acs','bde')
print 'acs acs'.translate(table,' ')
|
|
1b6af59ce27c16ae65620a5bf03dcd07d51659f4
|
fabfile/testbeds/testbed_jlab.py
|
fabfile/testbeds/testbed_jlab.py
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@172.21.0.10'
host2 = 'root@172.21.0.13'
host3 = 'root@172.21.0.14'
host4 = 'root@172.21.1.12'
host5 = 'root@172.21.1.13'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z3', 'z4','c2', 'c3']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
Add OCS jlab testbed file
|
Add OCS jlab testbed file
|
Python
|
apache-2.0
|
Juniper/contrail-fabric-utils,Juniper/contrail-fabric-utils
|
Add OCS jlab testbed file
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@172.21.0.10'
host2 = 'root@172.21.0.13'
host3 = 'root@172.21.0.14'
host4 = 'root@172.21.1.12'
host5 = 'root@172.21.1.13'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z3', 'z4','c2', 'c3']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
<commit_before><commit_msg>Add OCS jlab testbed file<commit_after>
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@172.21.0.10'
host2 = 'root@172.21.0.13'
host3 = 'root@172.21.0.14'
host4 = 'root@172.21.1.12'
host5 = 'root@172.21.1.13'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z3', 'z4','c2', 'c3']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
Add OCS jlab testbed filefrom fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@172.21.0.10'
host2 = 'root@172.21.0.13'
host3 = 'root@172.21.0.14'
host4 = 'root@172.21.1.12'
host5 = 'root@172.21.1.13'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z3', 'z4','c2', 'c3']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
<commit_before><commit_msg>Add OCS jlab testbed file<commit_after>from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@172.21.0.10'
host2 = 'root@172.21.0.13'
host3 = 'root@172.21.0.14'
host4 = 'root@172.21.1.12'
host5 = 'root@172.21.1.13'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z3', 'z4','c2', 'c3']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
|
370cab9bd98c73fb14078fd74131cef84ee3c71a
|
utils/img2video.py
|
utils/img2video.py
|
# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
A simple tool to translate a group of images to a video. In the gas track work,
it is used to gather the simulated gas images into a portable video.
References
----------
[1] OpenCV-Python Tutorials
https://opencv-python-toturials.readthedocs.io/en/latest/
[2] Solem, J.E.
"Programming Computer Vision with Python"
O'Reilly, 2012.
"""
import cv2
import os
import re
def img2video(input_dir, fileout, fps=4.0,imgwidth=(800,800),
fxpression = "snap"):
"""
Image to video transformation.
Parameters
----------
input_dir: string
Name of the folder holding those images.
fxpression: string
The regular expression of those images.
fileout: string
The filepath of output.
fps: float
Frames per second.
imgwidth: tuple
Width and height of the video frame.
Reference
---------
cv2.VideoWriter
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/
py_gui/py_video_display/py_video_display.html?highlight=videowriter
"""
# Define the codec and create ViderWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fileout,fourcc,fps,imgwidth)
# Detect pngfiles
files = os.listdir(input_dir)
files.sort()
for f in files:
if len(re.findall(r"snap",f)):
print(f)
image = cv2.imread(f)
output.write(image)
# Release
output.release()
|
Add a tool to transform multi-images into a video.
|
Add a tool to transform multi-images into a video.
|
Python
|
mit
|
myinxd/gastrack,myinxd/gastrack
|
Add a tool to transform multi-images into a video.
|
# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
A simple tool to translate a group of images to a video. In the gas track work,
it is used to gather the simulated gas images into a portable video.
References
----------
[1] OpenCV-Python Tutorials
https://opencv-python-toturials.readthedocs.io/en/latest/
[2] Solem, J.E.
"Programming Computer Vision with Python"
O'Reilly, 2012.
"""
import cv2
import os
import re
def img2video(input_dir, fileout, fps=4.0,imgwidth=(800,800),
fxpression = "snap"):
"""
Image to video transformation.
Parameters
----------
input_dir: string
Name of the folder holding those images.
fxpression: string
The regular expression of those images.
fileout: string
The filepath of output.
fps: float
Frames per second.
imgwidth: tuple
Width and height of the video frame.
Reference
---------
cv2.VideoWriter
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/
py_gui/py_video_display/py_video_display.html?highlight=videowriter
"""
# Define the codec and create ViderWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fileout,fourcc,fps,imgwidth)
# Detect pngfiles
files = os.listdir(input_dir)
files.sort()
for f in files:
if len(re.findall(r"snap",f)):
print(f)
image = cv2.imread(f)
output.write(image)
# Release
output.release()
|
<commit_before><commit_msg>Add a tool to transform multi-images into a video.<commit_after>
|
# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
A simple tool to translate a group of images to a video. In the gas track work,
it is used to gather the simulated gas images into a portable video.
References
----------
[1] OpenCV-Python Tutorials
https://opencv-python-toturials.readthedocs.io/en/latest/
[2] Solem, J.E.
"Programming Computer Vision with Python"
O'Reilly, 2012.
"""
import cv2
import os
import re
def img2video(input_dir, fileout, fps=4.0,imgwidth=(800,800),
fxpression = "snap"):
"""
Image to video transformation.
Parameters
----------
input_dir: string
Name of the folder holding those images.
fxpression: string
The regular expression of those images.
fileout: string
The filepath of output.
fps: float
Frames per second.
imgwidth: tuple
Width and height of the video frame.
Reference
---------
cv2.VideoWriter
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/
py_gui/py_video_display/py_video_display.html?highlight=videowriter
"""
# Define the codec and create ViderWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fileout,fourcc,fps,imgwidth)
# Detect pngfiles
files = os.listdir(input_dir)
files.sort()
for f in files:
if len(re.findall(r"snap",f)):
print(f)
image = cv2.imread(f)
output.write(image)
# Release
output.release()
|
Add a tool to transform multi-images into a video.# Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
A simple tool to translate a group of images to a video. In the gas track work,
it is used to gather the simulated gas images into a portable video.
References
----------
[1] OpenCV-Python Tutorials
https://opencv-python-toturials.readthedocs.io/en/latest/
[2] Solem, J.E.
"Programming Computer Vision with Python"
O'Reilly, 2012.
"""
import cv2
import os
import re
def img2video(input_dir, fileout, fps=4.0,imgwidth=(800,800),
fxpression = "snap"):
"""
Image to video transformation.
Parameters
----------
input_dir: string
Name of the folder holding those images.
fxpression: string
The regular expression of those images.
fileout: string
The filepath of output.
fps: float
Frames per second.
imgwidth: tuple
Width and height of the video frame.
Reference
---------
cv2.VideoWriter
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/
py_gui/py_video_display/py_video_display.html?highlight=videowriter
"""
# Define the codec and create ViderWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fileout,fourcc,fps,imgwidth)
# Detect pngfiles
files = os.listdir(input_dir)
files.sort()
for f in files:
if len(re.findall(r"snap",f)):
print(f)
image = cv2.imread(f)
output.write(image)
# Release
output.release()
|
<commit_before><commit_msg>Add a tool to transform multi-images into a video.<commit_after># Copyright (C) 2016 Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
A simple tool to translate a group of images to a video. In the gas track work,
it is used to gather the simulated gas images into a portable video.
References
----------
[1] OpenCV-Python Tutorials
https://opencv-python-toturials.readthedocs.io/en/latest/
[2] Solem, J.E.
"Programming Computer Vision with Python"
O'Reilly, 2012.
"""
import cv2
import os
import re
def img2video(input_dir, fileout, fps=4.0,imgwidth=(800,800),
fxpression = "snap"):
"""
Image to video transformation.
Parameters
----------
input_dir: string
Name of the folder holding those images.
fxpression: string
The regular expression of those images.
fileout: string
The filepath of output.
fps: float
Frames per second.
imgwidth: tuple
Width and height of the video frame.
Reference
---------
cv2.VideoWriter
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/
py_gui/py_video_display/py_video_display.html?highlight=videowriter
"""
# Define the codec and create ViderWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(fileout,fourcc,fps,imgwidth)
# Detect pngfiles
files = os.listdir(input_dir)
files.sort()
for f in files:
if len(re.findall(r"snap",f)):
print(f)
image = cv2.imread(f)
output.write(image)
# Release
output.release()
|
|
6332b285b805115d4a8d9aeacf81a836695e19c4
|
util/package.py
|
util/package.py
|
import bz2
import json
import optparse
import os
import shutil
import sys
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-t", "--trial-path", action="store", dest="trial_path", help="Path to the output from a benchmark run", default="/tmp/pybrig/trials")
(options, args) = parser.parse_args()
trial_base = options.trial_path
user = None
pwd = None
entries = os.listdir(trial_base)
output_aggregator = dict()
if 'gather.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'gather.json' in directory contents."
print "[ERROR] Path was %s" % trial_base
sys.exit(-1)
if 'capture.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'capture.json' in directory contents."
print "[ERROR] Path was: %s" % trial_base
sys.exit(-1)
curr = open(os.path.join(trial_base, 'capture.json'), 'r')
output_aggregator['system-info'] = json.load(curr)
curr = open(os.path.join(trial_base, 'gather.json'), 'r')
output_aggregator['gather-info'] = json.load(curr)
for entry in entries:
if not os.path.isdir(os.path.join(trial_base, entry)):
continue
curr_trial = os.path.join(trial_base, entry)
benchmark = os.path.join(curr_trial, 'benchmark.json')
profile = os.path.join(curr_trial, 'prof.json')
if not os.path.exists(benchmark):
print "[WARN] Malformed trial result - missing benchmark.json (%s)" % benchmark
continue
if not os.path.exists(profile):
print "[WARN] Malformed trial result - missing prof.json (%s)" % profile
continue
output_aggregator[entry] = dict()
curr = open(benchmark, 'r')
output_aggregator[entry]['benchmark'] = json.load(curr)
curr = open(profile, 'r')
output_aggregator[entry]['profile'] = json.load(curr)
output_file = bz2.BZ2File('benchmark-output.bz2', 'wb')
json.dump(output_aggregator, output_file, sort_keys=True, indent=4)
|
Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.
|
Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.
|
Python
|
bsd-3-clause
|
cubic1271/pybrig,cubic1271/pybrig
|
Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.
|
import bz2
import json
import optparse
import os
import shutil
import sys
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-t", "--trial-path", action="store", dest="trial_path", help="Path to the output from a benchmark run", default="/tmp/pybrig/trials")
(options, args) = parser.parse_args()
trial_base = options.trial_path
user = None
pwd = None
entries = os.listdir(trial_base)
output_aggregator = dict()
if 'gather.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'gather.json' in directory contents."
print "[ERROR] Path was %s" % trial_base
sys.exit(-1)
if 'capture.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'capture.json' in directory contents."
print "[ERROR] Path was: %s" % trial_base
sys.exit(-1)
curr = open(os.path.join(trial_base, 'capture.json'), 'r')
output_aggregator['system-info'] = json.load(curr)
curr = open(os.path.join(trial_base, 'gather.json'), 'r')
output_aggregator['gather-info'] = json.load(curr)
for entry in entries:
if not os.path.isdir(os.path.join(trial_base, entry)):
continue
curr_trial = os.path.join(trial_base, entry)
benchmark = os.path.join(curr_trial, 'benchmark.json')
profile = os.path.join(curr_trial, 'prof.json')
if not os.path.exists(benchmark):
print "[WARN] Malformed trial result - missing benchmark.json (%s)" % benchmark
continue
if not os.path.exists(profile):
print "[WARN] Malformed trial result - missing prof.json (%s)" % profile
continue
output_aggregator[entry] = dict()
curr = open(benchmark, 'r')
output_aggregator[entry]['benchmark'] = json.load(curr)
curr = open(profile, 'r')
output_aggregator[entry]['profile'] = json.load(curr)
output_file = bz2.BZ2File('benchmark-output.bz2', 'wb')
json.dump(output_aggregator, output_file, sort_keys=True, indent=4)
|
<commit_before><commit_msg>Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.<commit_after>
|
import bz2
import json
import optparse
import os
import shutil
import sys
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-t", "--trial-path", action="store", dest="trial_path", help="Path to the output from a benchmark run", default="/tmp/pybrig/trials")
(options, args) = parser.parse_args()
trial_base = options.trial_path
user = None
pwd = None
entries = os.listdir(trial_base)
output_aggregator = dict()
if 'gather.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'gather.json' in directory contents."
print "[ERROR] Path was %s" % trial_base
sys.exit(-1)
if 'capture.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'capture.json' in directory contents."
print "[ERROR] Path was: %s" % trial_base
sys.exit(-1)
curr = open(os.path.join(trial_base, 'capture.json'), 'r')
output_aggregator['system-info'] = json.load(curr)
curr = open(os.path.join(trial_base, 'gather.json'), 'r')
output_aggregator['gather-info'] = json.load(curr)
for entry in entries:
if not os.path.isdir(os.path.join(trial_base, entry)):
continue
curr_trial = os.path.join(trial_base, entry)
benchmark = os.path.join(curr_trial, 'benchmark.json')
profile = os.path.join(curr_trial, 'prof.json')
if not os.path.exists(benchmark):
print "[WARN] Malformed trial result - missing benchmark.json (%s)" % benchmark
continue
if not os.path.exists(profile):
print "[WARN] Malformed trial result - missing prof.json (%s)" % profile
continue
output_aggregator[entry] = dict()
curr = open(benchmark, 'r')
output_aggregator[entry]['benchmark'] = json.load(curr)
curr = open(profile, 'r')
output_aggregator[entry]['profile'] = json.load(curr)
output_file = bz2.BZ2File('benchmark-output.bz2', 'wb')
json.dump(output_aggregator, output_file, sort_keys=True, indent=4)
|
Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.import bz2
import json
import optparse
import os
import shutil
import sys
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-t", "--trial-path", action="store", dest="trial_path", help="Path to the output from a benchmark run", default="/tmp/pybrig/trials")
(options, args) = parser.parse_args()
trial_base = options.trial_path
user = None
pwd = None
entries = os.listdir(trial_base)
output_aggregator = dict()
if 'gather.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'gather.json' in directory contents."
print "[ERROR] Path was %s" % trial_base
sys.exit(-1)
if 'capture.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'capture.json' in directory contents."
print "[ERROR] Path was: %s" % trial_base
sys.exit(-1)
curr = open(os.path.join(trial_base, 'capture.json'), 'r')
output_aggregator['system-info'] = json.load(curr)
curr = open(os.path.join(trial_base, 'gather.json'), 'r')
output_aggregator['gather-info'] = json.load(curr)
for entry in entries:
if not os.path.isdir(os.path.join(trial_base, entry)):
continue
curr_trial = os.path.join(trial_base, entry)
benchmark = os.path.join(curr_trial, 'benchmark.json')
profile = os.path.join(curr_trial, 'prof.json')
if not os.path.exists(benchmark):
print "[WARN] Malformed trial result - missing benchmark.json (%s)" % benchmark
continue
if not os.path.exists(profile):
print "[WARN] Malformed trial result - missing prof.json (%s)" % profile
continue
output_aggregator[entry] = dict()
curr = open(benchmark, 'r')
output_aggregator[entry]['benchmark'] = json.load(curr)
curr = open(profile, 'r')
output_aggregator[entry]['profile'] = json.load(curr)
output_file = bz2.BZ2File('benchmark-output.bz2', 'wb')
json.dump(output_aggregator, output_file, sort_keys=True, indent=4)
|
<commit_before><commit_msg>Package utility: takes all output and turns into single (compressed) output file suitable for easier transport.<commit_after>import bz2
import json
import optparse
import os
import shutil
import sys
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-t", "--trial-path", action="store", dest="trial_path", help="Path to the output from a benchmark run", default="/tmp/pybrig/trials")
(options, args) = parser.parse_args()
trial_base = options.trial_path
user = None
pwd = None
entries = os.listdir(trial_base)
output_aggregator = dict()
if 'gather.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'gather.json' in directory contents."
print "[ERROR] Path was %s" % trial_base
sys.exit(-1)
if 'capture.json' not in entries:
print "[ERROR] Invalid trial directory: unable to find 'capture.json' in directory contents."
print "[ERROR] Path was: %s" % trial_base
sys.exit(-1)
curr = open(os.path.join(trial_base, 'capture.json'), 'r')
output_aggregator['system-info'] = json.load(curr)
curr = open(os.path.join(trial_base, 'gather.json'), 'r')
output_aggregator['gather-info'] = json.load(curr)
for entry in entries:
if not os.path.isdir(os.path.join(trial_base, entry)):
continue
curr_trial = os.path.join(trial_base, entry)
benchmark = os.path.join(curr_trial, 'benchmark.json')
profile = os.path.join(curr_trial, 'prof.json')
if not os.path.exists(benchmark):
print "[WARN] Malformed trial result - missing benchmark.json (%s)" % benchmark
continue
if not os.path.exists(profile):
print "[WARN] Malformed trial result - missing prof.json (%s)" % profile
continue
output_aggregator[entry] = dict()
curr = open(benchmark, 'r')
output_aggregator[entry]['benchmark'] = json.load(curr)
curr = open(profile, 'r')
output_aggregator[entry]['profile'] = json.load(curr)
output_file = bz2.BZ2File('benchmark-output.bz2', 'wb')
json.dump(output_aggregator, output_file, sort_keys=True, indent=4)
|
|
0b8a2a3a0f010538dd30ce04ca1ce943347a04a8
|
django_fixmystreet/fmsproxy/models.py
|
django_fixmystreet/fmsproxy/models.py
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": None,
},
}
comments = report.active_comments()
if comments:
payload["report"]["comments"] = []
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": [],
},
}
comments = report.active_attachments_pro()
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
Use `active_attachments_pro` instead of `active_comments`.
|
Fix: Use `active_attachments_pro` instead of `active_comments`.
|
Python
|
agpl-3.0
|
IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": None,
},
}
comments = report.active_comments()
if comments:
payload["report"]["comments"] = []
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
Fix: Use `active_attachments_pro` instead of `active_comments`.
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": [],
},
}
comments = report.active_attachments_pro()
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
<commit_before>from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": None,
},
}
comments = report.active_comments()
if comments:
payload["report"]["comments"] = []
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
<commit_msg>Fix: Use `active_attachments_pro` instead of `active_comments`.<commit_after>
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": [],
},
}
comments = report.active_attachments_pro()
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": None,
},
}
comments = report.active_comments()
if comments:
payload["report"]["comments"] = []
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
Fix: Use `active_attachments_pro` instead of `active_comments`.from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": [],
},
}
comments = report.active_attachments_pro()
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
<commit_before>from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": None,
},
}
comments = report.active_comments()
if comments:
payload["report"]["comments"] = []
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
<commit_msg>Fix: Use `active_attachments_pro` instead of `active_comments`.<commit_after>from django.db import models
import logging
logger = logging.getLogger(__name__)
class FMSProxy(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
def get_assign_payload(report):
creator = report.get_creator()
payload = {
"application": report.contractor.fmsproxy.name.lower(),
"report":{
"id": report.id,
"created_at": report.created.isoformat(),
"modified_at": report.modified.isoformat(),
"category": report.display_category(),
"pdf_url": report.get_pdf_url_pro(),
"address": report.address,
"address_number": report.address_number,
"postal_code": report.postalcode,
"municipality": report.get_address_commune_name(),
"creator": {
"type": "pro" if report.is_pro() else "citizen",
"first_name": creator.first_name,
"last_name": creator.last_name,
"phone": creator.telephone,
"email": creator.email,
},
"comments": [],
},
}
comments = report.active_attachments_pro()
for comment in comments:
payload["report"]["comments"].append({
"created_at": comment.created.isoformat(),
"name": comment.get_display_name(),
"text": comment.text,
})
return payload
|
829ccc3384126a48b8d54ac651a93e169e417176
|
dbaas/maintenance/admin/maintenance.py
|
dbaas/maintenance/admin/maintenance.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..models import Maintenance
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
readonly_fields = ('status', 'celery_task_id')
form = MaintenanceForm
def change_view(self, request, object_id, form_url='', extra_context=None):
maintenance = Maintenance.objects.get(id=object_id)
if maintenance.celery_task_id:
self.readonly_fields = self.fields
return super(MaintenanceAdmin, self).change_view(request,
object_id, form_url, extra_context=extra_context)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
form = MaintenanceForm
def get_readonly_fields(self, request, obj=None):
maintenance = obj
if maintenance:
if maintenance.celery_task_id:
return self.fields
return ('status', 'celery_task_id',)
|
Add get_read_only and remove old change_view customization
|
Add get_read_only and remove old change_view customization
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..models import Maintenance
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
readonly_fields = ('status', 'celery_task_id')
form = MaintenanceForm
def change_view(self, request, object_id, form_url='', extra_context=None):
maintenance = Maintenance.objects.get(id=object_id)
if maintenance.celery_task_id:
self.readonly_fields = self.fields
return super(MaintenanceAdmin, self).change_view(request,
object_id, form_url, extra_context=extra_context)
Add get_read_only and remove old change_view customization
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
form = MaintenanceForm
def get_readonly_fields(self, request, obj=None):
maintenance = obj
if maintenance:
if maintenance.celery_task_id:
return self.fields
return ('status', 'celery_task_id',)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..models import Maintenance
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
readonly_fields = ('status', 'celery_task_id')
form = MaintenanceForm
def change_view(self, request, object_id, form_url='', extra_context=None):
maintenance = Maintenance.objects.get(id=object_id)
if maintenance.celery_task_id:
self.readonly_fields = self.fields
return super(MaintenanceAdmin, self).change_view(request,
object_id, form_url, extra_context=extra_context)
<commit_msg>Add get_read_only and remove old change_view customization<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
form = MaintenanceForm
def get_readonly_fields(self, request, obj=None):
maintenance = obj
if maintenance:
if maintenance.celery_task_id:
return self.fields
return ('status', 'celery_task_id',)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..models import Maintenance
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
readonly_fields = ('status', 'celery_task_id')
form = MaintenanceForm
def change_view(self, request, object_id, form_url='', extra_context=None):
maintenance = Maintenance.objects.get(id=object_id)
if maintenance.celery_task_id:
self.readonly_fields = self.fields
return super(MaintenanceAdmin, self).change_view(request,
object_id, form_url, extra_context=extra_context)
Add get_read_only and remove old change_view customization# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
form = MaintenanceForm
def get_readonly_fields(self, request, obj=None):
maintenance = obj
if maintenance:
if maintenance.celery_task_id:
return self.fields
return ('status', 'celery_task_id',)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..models import Maintenance
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
readonly_fields = ('status', 'celery_task_id')
form = MaintenanceForm
def change_view(self, request, object_id, form_url='', extra_context=None):
maintenance = Maintenance.objects.get(id=object_id)
if maintenance.celery_task_id:
self.readonly_fields = self.fields
return super(MaintenanceAdmin, self).change_view(request,
object_id, form_url, extra_context=extra_context)
<commit_msg>Add get_read_only and remove old change_view customization<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from ..service.maintenance import MaintenanceService
from ..forms import MaintenanceForm
class MaintenanceAdmin(admin.DjangoServicesAdmin):
service_class = MaintenanceService
search_fields = ("scheduled_for", "description", "maximum_workers", 'status')
list_display = ("scheduled_for", "description", "maximum_workers", 'status')
fields = ( "description", "scheduled_for", "main_script", "rollback_script",
"host_query","maximum_workers", "status", "celery_task_id",)
save_on_top = True
form = MaintenanceForm
def get_readonly_fields(self, request, obj=None):
maintenance = obj
if maintenance:
if maintenance.celery_task_id:
return self.fields
return ('status', 'celery_task_id',)
|
d5538e7daf5b3dbefa1ff0e76ced46eb194c836c
|
dodger/tests/test_osx_dodger_class.py
|
dodger/tests/test_osx_dodger_class.py
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
def test_allowed_system_is_mac(self):
"""
Allowed system to run this script should be
a machine running on OS X
"""
expected = "darwin"
result = OSXDodger().allowed_sys
self.assertEqual(result, expected)
|
Test that the anly allowed system to execute this script is OS X
|
Test that the anly allowed system to execute this script is OS X
|
Python
|
mit
|
yoda-yoda/osx-dock-dodger,denisKaranja/osx-dock-dodger
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
Test that the anly allowed system to execute this script is OS X
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
def test_allowed_system_is_mac(self):
"""
Allowed system to run this script should be
a machine running on OS X
"""
expected = "darwin"
result = OSXDodger().allowed_sys
self.assertEqual(result, expected)
|
<commit_before>from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
<commit_msg>Test that the anly allowed system to execute this script is OS X<commit_after>
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
def test_allowed_system_is_mac(self):
"""
Allowed system to run this script should be
a machine running on OS X
"""
expected = "darwin"
result = OSXDodger().allowed_sys
self.assertEqual(result, expected)
|
from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
Test that the anly allowed system to execute this script is OS Xfrom unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
def test_allowed_system_is_mac(self):
"""
Allowed system to run this script should be
a machine running on OS X
"""
expected = "darwin"
result = OSXDodger().allowed_sys
self.assertEqual(result, expected)
|
<commit_before>from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
<commit_msg>Test that the anly allowed system to execute this script is OS X<commit_after>from unittest import TestCase
from ..dock_dodger import OSXDodger
class OSXDockDodgerTests(TestCase):
def test_applications_folder_is_correct(self):
"""
Test that the applications folder is
indeed `/Applications/`
"""
expected = "/Applications/"
result = OSXDodger().app_dir
self.assertEqual(result, expected)
def test_allowed_system_is_mac(self):
"""
Allowed system to run this script should be
a machine running on OS X
"""
expected = "darwin"
result = OSXDodger().allowed_sys
self.assertEqual(result, expected)
|
c583f139b5092c132cb738f8bcbb5f305a0204a9
|
evaluation/packages/relationGraph.py
|
evaluation/packages/relationGraph.py
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
self.indexedPrimArray = {}
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
self.indexedPrimArray[p.uid] = p
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
# Call the functor over the primitives connected by and edge
def processConnectedNodes(self, functor):
for e in self.G.edges():
functor( self.indexedPrimArray[e[0]], self.indexedPrimArray[e[1]])
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
Add add function taking a functor as input to process connected primitives
|
Add add function taking a functor as input to process connected primitives
|
Python
|
apache-2.0
|
amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
Add add function taking a functor as input to process connected primitives
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
self.indexedPrimArray = {}
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
self.indexedPrimArray[p.uid] = p
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
# Call the functor over the primitives connected by and edge
def processConnectedNodes(self, functor):
for e in self.G.edges():
functor( self.indexedPrimArray[e[0]], self.indexedPrimArray[e[1]])
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
<commit_before>"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
<commit_msg>Add add function taking a functor as input to process connected primitives<commit_after>
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
self.indexedPrimArray = {}
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
self.indexedPrimArray[p.uid] = p
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
# Call the functor over the primitives connected by and edge
def processConnectedNodes(self, functor):
for e in self.G.edges():
functor( self.indexedPrimArray[e[0]], self.indexedPrimArray[e[1]])
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
Add add function taking a functor as input to process connected primitives"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
self.indexedPrimArray = {}
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
self.indexedPrimArray[p.uid] = p
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
# Call the functor over the primitives connected by and edge
def processConnectedNodes(self, functor):
for e in self.G.edges():
functor( self.indexedPrimArray[e[0]], self.indexedPrimArray[e[1]])
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
<commit_before>"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
<commit_msg>Add add function taking a functor as input to process connected primitives<commit_after>"""@package Primitive
This module provides an abstraction of the relationGraph using networkX
"""
import networkx as nx
import packages.primitive as primitive
class RelationGraph(object):
def __init__(self,primArray, assignArray):
self.G=nx.Graph()
self.indexedPrimArray = {}
# First create the nodes
for p in primArray:
self.G.add_node(p.uid, w=0)
self.indexedPrimArray[p.uid] = p
# Then their relations
for idx1, p1 in enumerate(primArray):
for idx2, p2 in enumerate(primArray):
if (idx2 > idx1):
self.G.add_edge(p1.uid, p2.uid)
# And finaly the node weights (number of samples)
# assignArray[][0] = point id
# assignArray[][1] = primitive uid
for a in assignArray:
if a[1] in self.G.node:
self.G.node[a[1]]['w'] += 1
#print "Number of primitives: ",self.G.number_of_nodes()
#print "Number of connections: ",self.G.number_of_edges()
# Call the functor over the primitives connected by and edge
def processConnectedNodes(self, functor):
for e in self.G.edges():
functor( self.indexedPrimArray[e[0]], self.indexedPrimArray[e[1]])
def draw(self):
nx.draw_spectral(self.G)
#nx.draw_networkx_labels(self.G,pos=nx.spectral_layout(self.G))
|
12e924cd617811cb763857a9abf14e8b3487f5a1
|
ckanext/nhm/routes/bbcm.py
|
ckanext/nhm/routes/bbcm.py
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint with a prefix
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__,
url_prefix=u'/big-butterfly-count-map')
@blueprint.route(u'')
@blueprint.route(u'/')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__)
@blueprint.route(u'/big-butterfly-count-map')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
Allow the url to be accessed with or without a / on the end
|
Allow the url to be accessed with or without a / on the end
|
Python
|
mit
|
NaturalHistoryMuseum/ckanext-nhm,NaturalHistoryMuseum/ckanext-nhm,NaturalHistoryMuseum/ckanext-nhm
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint with a prefix
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__,
url_prefix=u'/big-butterfly-count-map')
@blueprint.route(u'')
@blueprint.route(u'/')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
Allow the url to be accessed with or without a / on the end
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__)
@blueprint.route(u'/big-butterfly-count-map')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
<commit_before># !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint with a prefix
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__,
url_prefix=u'/big-butterfly-count-map')
@blueprint.route(u'')
@blueprint.route(u'/')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
<commit_msg>Allow the url to be accessed with or without a / on the end<commit_after>
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__)
@blueprint.route(u'/big-butterfly-count-map')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint with a prefix
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__,
url_prefix=u'/big-butterfly-count-map')
@blueprint.route(u'')
@blueprint.route(u'/')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
Allow the url to be accessed with or without a / on the end# !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__)
@blueprint.route(u'/big-butterfly-count-map')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
<commit_before># !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint with a prefix
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__,
url_prefix=u'/big-butterfly-count-map')
@blueprint.route(u'')
@blueprint.route(u'/')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
<commit_msg>Allow the url to be accessed with or without a / on the end<commit_after># !/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from flask import Blueprint
from ckan.plugins import toolkit
# bbcm = big butterfly count map :)
# create a flask blueprint
blueprint = Blueprint(name=u'big-butterfly-count-map', import_name=__name__)
@blueprint.route(u'/big-butterfly-count-map')
def bbcm():
'''
Render the big butterfly count map page.
'''
return toolkit.render(u'bbcm.html', {})
|
7c034802338c78ccb895b7a362e0d4ed11b6b4da
|
.offlineimap.py
|
.offlineimap.py
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
"""Return password for the given machine/login/port.
Your .authinfo.gpg file had better follow the following order, or
you will not get a result.
"""
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
Add a comment for the get_password_emacs function
|
Add a comment for the get_password_emacs function
Comment necessary because the format of authinfo needs to match the
semi-brittle regex (ah, regexes...)
This also moves the file to a proper dotfile, similar to commit
42f2b513a7949edf901b18233c1229bfcc24b706
|
Python
|
mit
|
olive42/dotfiles,olive42/dotfiles
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
Add a comment for the get_password_emacs function
Comment necessary because the format of authinfo needs to match the
semi-brittle regex (ah, regexes...)
This also moves the file to a proper dotfile, similar to commit
42f2b513a7949edf901b18233c1229bfcc24b706
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
"""Return password for the given machine/login/port.
Your .authinfo.gpg file had better follow the following order, or
you will not get a result.
"""
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
<commit_before>#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
<commit_msg>Add a comment for the get_password_emacs function
Comment necessary because the format of authinfo needs to match the
semi-brittle regex (ah, regexes...)
This also moves the file to a proper dotfile, similar to commit
42f2b513a7949edf901b18233c1229bfcc24b706<commit_after>
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
"""Return password for the given machine/login/port.
Your .authinfo.gpg file had better follow the following order, or
you will not get a result.
"""
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
Add a comment for the get_password_emacs function
Comment necessary because the format of authinfo needs to match the
semi-brittle regex (ah, regexes...)
This also moves the file to a proper dotfile, similar to commit
42f2b513a7949edf901b18233c1229bfcc24b706#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
"""Return password for the given machine/login/port.
Your .authinfo.gpg file had better follow the following order, or
you will not get a result.
"""
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
<commit_before>#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
<commit_msg>Add a comment for the get_password_emacs function
Comment necessary because the format of authinfo needs to match the
semi-brittle regex (ah, regexes...)
This also moves the file to a proper dotfile, similar to commit
42f2b513a7949edf901b18233c1229bfcc24b706<commit_after>#!/usr/bin/python
import re, os
def get_password_emacs(machine, login, port):
"""Return password for the given machine/login/port.
Your .authinfo.gpg file had better follow the following order, or
you will not get a result.
"""
s = "machine %s login %s port %s password ([^ ]*)\n" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
|
e79010f0aedf6f832ef14a72f435ddba33068e35
|
kindergarten-garden/kindergarten_garden.py
|
kindergarten-garden/kindergarten_garden.py
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
rows = garden.split()
patches = [rows[0][i:i+2] + rows[1][i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
row1, row2 = garden.split()
patches = [row1[i:i+2] + row2[i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
Use unpacking for simpler code
|
Use unpacking for simpler code
|
Python
|
agpl-3.0
|
CubicComet/exercism-python-solutions
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
rows = garden.split()
patches = [rows[0][i:i+2] + rows[1][i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
Use unpacking for simpler code
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
row1, row2 = garden.split()
patches = [row1[i:i+2] + row2[i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
<commit_before>CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
rows = garden.split()
patches = [rows[0][i:i+2] + rows[1][i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
<commit_msg>Use unpacking for simpler code<commit_after>
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
row1, row2 = garden.split()
patches = [row1[i:i+2] + row2[i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
rows = garden.split()
patches = [rows[0][i:i+2] + rows[1][i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
Use unpacking for simpler codeCHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
row1, row2 = garden.split()
patches = [row1[i:i+2] + row2[i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
<commit_before>CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
rows = garden.split()
patches = [rows[0][i:i+2] + rows[1][i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
<commit_msg>Use unpacking for simpler code<commit_after>CHILDREN = ["Alice", "Bob", "Charlie", "David", "Eve", "Fred",
"Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry"]
PLANTS = {"C": "Clover", "G": "Grass", "R": "Radishes", "V": "Violets"}
class Garden(object):
def __init__(self, garden, students=CHILDREN):
self.students = sorted(students)
row1, row2 = garden.split()
patches = [row1[i:i+2] + row2[i:i+2]
for i in range(0,2*len(self.students),2)]
self._garden = {s: [PLANTS[ch] for ch in p]
for s, p in zip(self.students, patches)}
def plants(self, student):
return self._garden[student]
|
e01d45e3ee39023814bca75b1344477e42865b0b
|
ds_max_priority_queue.py
|
ds_max_priority_queue.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parent(i):
return i // 2
def left(i):
return 2 * i
def right(i):
return 2 * i + 1
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
Add parent(), left() & right()
|
Add parent(), left() & right()
|
Python
|
bsd-2-clause
|
bowen0701/algorithms_data_structures
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
Add parent(), left() & right()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parent(i):
return i // 2
def left(i):
return 2 * i
def right(i):
return 2 * i + 1
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
<commit_msg>Add parent(), left() & right()<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parent(i):
return i // 2
def left(i):
return 2 * i
def right(i):
return 2 * i + 1
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
Add parent(), left() & right()from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parent(i):
return i // 2
def left(i):
return 2 * i
def right(i):
return 2 * i + 1
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
<commit_msg>Add parent(), left() & right()<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parent(i):
return i // 2
def left(i):
return 2 * i
def right(i):
return 2 * i + 1
class MaxPriorityQueue(object):
"""Max Priority Queue."""
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
e3425433db1a60a598f39b85cd438a0ef0659f87
|
bhpssh.py
|
bhpssh.py
|
#!/ usr/bin/python
#Black Hat Python
#SSH with Paramiko
#pg 26
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
#!/ usr/bin/python
#Black Hat Python SSH with Paramiko pg 26
#TODO: ADD FUNCTIONS AND ARGUMENTS, AND DONT FORGET TO DEBUG.
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG
|
TODO: ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG
|
Python
|
mit
|
n1cfury/BlackHatPython
|
#!/ usr/bin/python
#Black Hat Python
#SSH with Paramiko
#pg 26
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of ProgramTODO: ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG
|
#!/ usr/bin/python
#Black Hat Python SSH with Paramiko pg 26
#TODO: ADD FUNCTIONS AND ARGUMENTS, AND DONT FORGET TO DEBUG.
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
<commit_before> #!/ usr/bin/python
#Black Hat Python
#SSH with Paramiko
#pg 26
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program<commit_msg>TODO: ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG<commit_after>
|
#!/ usr/bin/python
#Black Hat Python SSH with Paramiko pg 26
#TODO: ADD FUNCTIONS AND ARGUMENTS, AND DONT FORGET TO DEBUG.
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
#!/ usr/bin/python
#Black Hat Python
#SSH with Paramiko
#pg 26
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of ProgramTODO: ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG #!/ usr/bin/python
#Black Hat Python SSH with Paramiko pg 26
#TODO: ADD FUNCTIONS AND ARGUMENTS, AND DONT FORGET TO DEBUG.
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
<commit_before> #!/ usr/bin/python
#Black Hat Python
#SSH with Paramiko
#pg 26
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program<commit_msg>TODO: ADD FUNCTIONS AND ARGUMENTS. YOU'LL PROBABLY NEED TO DEBUG<commit_after> #!/ usr/bin/python
#Black Hat Python SSH with Paramiko pg 26
#TODO: ADD FUNCTIONS AND ARGUMENTS, AND DONT FORGET TO DEBUG.
import threading, paramiko, subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/justin/.ssh/known_hosts')
client.set_missing_host_key_policy(paramoko.AutoAddPolicy())
client.connect(ip, usernmae= user, password= passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024) #read banner
while True:
command = ssh_session.recv(1024) #get the command from the SSH server
try:
cmd_output = subprocess.check_output(command, shell=True)
ssh_session.send(cmd_output)
excelpt: Exceptoin,e:
ssh_session.send(str(e))
client.close()
return
ssh_command('192.168.100.131', 'justin', 'lovesthepython','id')
#End of Program
|
73d59df8b94f72e83b978c00518afa01967faac9
|
mle/test_package.py
|
mle/test_package.py
|
def test_distribution():
from mle import Normal, var, par
import theano.tensor as T
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
assert(len(dist.get_vars()) == 1)
assert(len(dist.get_params()) == 2)
assert(len(dist.get_dists()) == 0)
|
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The result should be a formula that can be plugged into a model.
"""
from mle import var, par
x = var('x')
a = par('a')
b = par('b')
formula = a * x**2 + b
def test_simple_fit():
"""
Check if generating/fitting Gaussian data works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = dist.sample(1e6, {'mu': 0, 'sigma': 1})
results = dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='L-BFGS-B')
def test_linear_regression():
"""
Check if fitting a linear model works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
y = var('y')
a = par('a')
b = par('b')
sigma = par('sigma')
dist = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = linspace(0, 1, 20)
ys = dist.sample(20, {'x': xs, 'a': 1, 'b': 0, 'sigma': 0.5})
results = dist.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
|
Add some tests that don't pass yet
|
Add some tests that don't pass yet
|
Python
|
mit
|
ibab/python-mle
|
def test_distribution():
from mle import Normal, var, par
import theano.tensor as T
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
assert(len(dist.get_vars()) == 1)
assert(len(dist.get_params()) == 2)
assert(len(dist.get_dists()) == 0)
Add some tests that don't pass yet
|
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The result should be a formula that can be plugged into a model.
"""
from mle import var, par
x = var('x')
a = par('a')
b = par('b')
formula = a * x**2 + b
def test_simple_fit():
"""
Check if generating/fitting Gaussian data works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = dist.sample(1e6, {'mu': 0, 'sigma': 1})
results = dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='L-BFGS-B')
def test_linear_regression():
"""
Check if fitting a linear model works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
y = var('y')
a = par('a')
b = par('b')
sigma = par('sigma')
dist = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = linspace(0, 1, 20)
ys = dist.sample(20, {'x': xs, 'a': 1, 'b': 0, 'sigma': 0.5})
results = dist.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
|
<commit_before>def test_distribution():
from mle import Normal, var, par
import theano.tensor as T
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
assert(len(dist.get_vars()) == 1)
assert(len(dist.get_params()) == 2)
assert(len(dist.get_dists()) == 0)
<commit_msg>Add some tests that don't pass yet<commit_after>
|
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The result should be a formula that can be plugged into a model.
"""
from mle import var, par
x = var('x')
a = par('a')
b = par('b')
formula = a * x**2 + b
def test_simple_fit():
"""
Check if generating/fitting Gaussian data works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = dist.sample(1e6, {'mu': 0, 'sigma': 1})
results = dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='L-BFGS-B')
def test_linear_regression():
"""
Check if fitting a linear model works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
y = var('y')
a = par('a')
b = par('b')
sigma = par('sigma')
dist = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = linspace(0, 1, 20)
ys = dist.sample(20, {'x': xs, 'a': 1, 'b': 0, 'sigma': 0.5})
results = dist.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
|
def test_distribution():
from mle import Normal, var, par
import theano.tensor as T
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
assert(len(dist.get_vars()) == 1)
assert(len(dist.get_params()) == 2)
assert(len(dist.get_dists()) == 0)
Add some tests that don't pass yet
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The result should be a formula that can be plugged into a model.
"""
from mle import var, par
x = var('x')
a = par('a')
b = par('b')
formula = a * x**2 + b
def test_simple_fit():
"""
Check if generating/fitting Gaussian data works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = dist.sample(1e6, {'mu': 0, 'sigma': 1})
results = dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='L-BFGS-B')
def test_linear_regression():
"""
Check if fitting a linear model works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
y = var('y')
a = par('a')
b = par('b')
sigma = par('sigma')
dist = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = linspace(0, 1, 20)
ys = dist.sample(20, {'x': xs, 'a': 1, 'b': 0, 'sigma': 0.5})
results = dist.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
|
<commit_before>def test_distribution():
from mle import Normal, var, par
import theano.tensor as T
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
assert(len(dist.get_vars()) == 1)
assert(len(dist.get_params()) == 2)
assert(len(dist.get_dists()) == 0)
<commit_msg>Add some tests that don't pass yet<commit_after>
def test_formula_transform():
"""
Check if variables can be added/multiplied/transformed.
The result should be a formula that can be plugged into a model.
"""
from mle import var, par
x = var('x')
a = par('a')
b = par('b')
formula = a * x**2 + b
def test_simple_fit():
"""
Check if generating/fitting Gaussian data works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
mu = par('mu')
sigma = par('sigma')
dist = Normal(x, mu, sigma)
np.random.seed(42)
data = dist.sample(1e6, {'mu': 0, 'sigma': 1})
results = dist.fit({'x': data}, {'mu': 1, 'sigma': 2}, method='L-BFGS-B')
def test_linear_regression():
"""
Check if fitting a linear model works
"""
from mle import Normal, var, par
import theano.tensor as T
import numpy as np
x = var('x')
y = var('y')
a = par('a')
b = par('b')
sigma = par('sigma')
dist = Normal(y, a * x + b, sigma)
np.random.seed(42)
xs = linspace(0, 1, 20)
ys = dist.sample(20, {'x': xs, 'a': 1, 'b': 0, 'sigma': 0.5})
results = dist.fit({'x': xs, 'y': ys}, {'a': 2, 'b': 1, 'sigma': 1})
|
17db6b7a7236abdc5199a40f98e4862724929c38
|
conanfile.py
|
conanfile.py
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.6"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.7"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
Bump version: 0.0.6 -> 0.0.7
|
Bump version: 0.0.6 -> 0.0.7
[ci skip]
|
Python
|
mit
|
polysquare/tooling-cmake-util,polysquare/tooling-cmake-util
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.6"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
Bump version: 0.0.6 -> 0.0.7
[ci skip]
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.7"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
<commit_before>from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.6"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
<commit_msg>Bump version: 0.0.6 -> 0.0.7
[ci skip]<commit_after>
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.7"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.6"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
Bump version: 0.0.6 -> 0.0.7
[ci skip]from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.7"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
<commit_before>from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.6"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
<commit_msg>Bump version: 0.0.6 -> 0.0.7
[ci skip]<commit_after>from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.7"
class ToolingCMakeUtilConan(ConanFile):
name = "tooling-cmake-util"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-unit/master@smspillaz/cmake-unit",
"cmake-header-language/master@smspillaz/cmake-header-language")
url = "http://github.com/polysquare/tooling-cmake-util"
license = "MIT"
def source(self):
zip_name = "tooling-cmake-util.zip"
download("https://github.com/polysquare/"
"tooling-cmake-util/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/tooling-cmake-util",
src="tooling-cmake-util-" + VERSION,
keep_path=True)
|
e3c6b5ce00502077f56ea7033132356ff88a1a55
|
app/soc/mapreduce/gci_insert_dummy_data.py
|
app/soc/mapreduce/gci_insert_dummy_data.py
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(lambda blob: blob, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
Replace "lambda blob: blob" with "bool".
|
Replace "lambda blob: blob" with "bool".
This is legitimate (and even an improvement) since the function is
passed to filter, which will use its return value in a boolean
context anyway.
This also cleans up a lint warning.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(lambda blob: blob, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
Replace "lambda blob: blob" with "bool".
This is legitimate (and even an improvement) since the function is
passed to filter, which will use its return value in a boolean
context anyway.
This also cleans up a lint warning.
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
<commit_before># Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(lambda blob: blob, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
<commit_msg>Replace "lambda blob: blob" with "bool".
This is legitimate (and even an improvement) since the function is
passed to filter, which will use its return value in a boolean
context anyway.
This also cleans up a lint warning.<commit_after>
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(lambda blob: blob, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
Replace "lambda blob: blob" with "bool".
This is legitimate (and even an improvement) since the function is
passed to filter, which will use its return value in a boolean
context anyway.
This also cleans up a lint warning.# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
<commit_before># Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(lambda blob: blob, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
<commit_msg>Replace "lambda blob: blob" with "bool".
This is legitimate (and even an improvement) since the function is
passed to filter, which will use its return value in a boolean
context anyway.
This also cleans up a lint warning.<commit_after># Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to insert dummy data for GCI student data for safe-harboring."""
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.logic import profile as profile_logic
def process(student_info):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key_str = params['program_key']
program_key = db.Key.from_path('GCIProgram', program_key_str)
# We can skip the student info entity not belonging to the given program.
if student_info.program.key() != program_key:
return
entities, blobs = profile_logic.insertDummyData(student_info)
blobstore.delete(filter(bool, blobs))
for entity in entities:
yield operation.db.Put(entity)
yield operation.counters.Increment("profile dummy data inserted")
|
21d45c0830f9634beb04dcbc05b43ae77780a713
|
examples/explicit_serializer.py
|
examples/explicit_serializer.py
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
Add line at the end of example
|
Add line at the end of example
|
Python
|
apache-2.0
|
opensistemas-hub/osbrain
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')Add line at the end of example
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
<commit_before>import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')<commit_msg>Add line at the end of example<commit_after>
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')Add line at the end of exampleimport time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
<commit_before>import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')<commit_msg>Add line at the end of example<commit_after>import time
from osbrain import run_nameserver
from osbrain import run_agent
# Simple handler for example purposes
def set_received(agent, message, topic=None):
agent.received = message
if __name__ == '__main__':
# System deployment
run_nameserver()
a0 = run_agent('a0')
a1 = run_agent('a1')
# Bind to a socket, specifying the serializer type
addr = a1.bind('PULL', handler=set_received, serializer='pickle')
# Stablish a connection. Note that serializer is not needed, since it is
# automatically chosen appropiately.
a0.connect(addr, 'push')
a0.send('push', 'Hello world')
while not a1.get_attr('received'):
time.sleep(0.1)
print('Message received!')
|
593fb7d6db4a5fe35a80fcad300eb43bb93ba3bb
|
social_core/tests/backends/test_udata.py
|
social_core/tests/backends/test_udata.py
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'first_name': 'foobar',
'email': 'foobar@example.com'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
Fix tests for udata/datagouvfr backend
|
Fix tests for udata/datagouvfr backend
|
Python
|
bsd-3-clause
|
python-social-auth/social-core,python-social-auth/social-core
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
Fix tests for udata/datagouvfr backend
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'first_name': 'foobar',
'email': 'foobar@example.com'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
<commit_before>import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
<commit_msg>Fix tests for udata/datagouvfr backend<commit_after>
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'first_name': 'foobar',
'email': 'foobar@example.com'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
Fix tests for udata/datagouvfr backendimport json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'first_name': 'foobar',
'email': 'foobar@example.com'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
<commit_before>import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
<commit_msg>Fix tests for udata/datagouvfr backend<commit_after>import json
from six.moves.urllib_parse import urlencode
from .oauth import OAuth2Test
class DatagouvfrOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.udata.DatagouvfrOAuth2'
user_data_url = 'https://www.data.gouv.fr/api/1/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
'first_name': 'foobar',
'email': 'foobar@example.com'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
55cd1bc079017945c2b8f48542c491d6a7d5153f
|
tests/test_cl_json.py
|
tests/test_cl_json.py
|
from kqml import cl_json, KQMLList
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
# TODO: Should test for equality.
|
from kqml import cl_json, KQMLList
def _equal(json_val, back_json_val):
if json_val is False and back_json_val is None:
return True
if type(json_val) != type(back_json_val):
return False
if isinstance(json_val, dict):
ret = True
for key, value in json_val.items():
if not _equal(value, back_json_val[key]):
ret = False
break
elif isinstance(json_val, list):
ret = True
for i, value in enumerate(json_val):
if not _equal(value, back_json_val[i]):
ret = False
break
else:
ret = (json_val == back_json_val)
return ret
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
assert _equal(json_dict, back_dict)
|
Write deeper test of equality for recovered dict.
|
Write deeper test of equality for recovered dict.
|
Python
|
bsd-2-clause
|
bgyori/pykqml
|
from kqml import cl_json, KQMLList
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
# TODO: Should test for equality.
Write deeper test of equality for recovered dict.
|
from kqml import cl_json, KQMLList
def _equal(json_val, back_json_val):
if json_val is False and back_json_val is None:
return True
if type(json_val) != type(back_json_val):
return False
if isinstance(json_val, dict):
ret = True
for key, value in json_val.items():
if not _equal(value, back_json_val[key]):
ret = False
break
elif isinstance(json_val, list):
ret = True
for i, value in enumerate(json_val):
if not _equal(value, back_json_val[i]):
ret = False
break
else:
ret = (json_val == back_json_val)
return ret
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
assert _equal(json_dict, back_dict)
|
<commit_before>from kqml import cl_json, KQMLList
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
# TODO: Should test for equality.
<commit_msg>Write deeper test of equality for recovered dict.<commit_after>
|
from kqml import cl_json, KQMLList
def _equal(json_val, back_json_val):
if json_val is False and back_json_val is None:
return True
if type(json_val) != type(back_json_val):
return False
if isinstance(json_val, dict):
ret = True
for key, value in json_val.items():
if not _equal(value, back_json_val[key]):
ret = False
break
elif isinstance(json_val, list):
ret = True
for i, value in enumerate(json_val):
if not _equal(value, back_json_val[i]):
ret = False
break
else:
ret = (json_val == back_json_val)
return ret
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
assert _equal(json_dict, back_dict)
|
from kqml import cl_json, KQMLList
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
# TODO: Should test for equality.
Write deeper test of equality for recovered dict.from kqml import cl_json, KQMLList
def _equal(json_val, back_json_val):
if json_val is False and back_json_val is None:
return True
if type(json_val) != type(back_json_val):
return False
if isinstance(json_val, dict):
ret = True
for key, value in json_val.items():
if not _equal(value, back_json_val[key]):
ret = False
break
elif isinstance(json_val, list):
ret = True
for i, value in enumerate(json_val):
if not _equal(value, back_json_val[i]):
ret = False
break
else:
ret = (json_val == back_json_val)
return ret
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
assert _equal(json_dict, back_dict)
|
<commit_before>from kqml import cl_json, KQMLList
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
# TODO: Should test for equality.
<commit_msg>Write deeper test of equality for recovered dict.<commit_after>from kqml import cl_json, KQMLList
def _equal(json_val, back_json_val):
if json_val is False and back_json_val is None:
return True
if type(json_val) != type(back_json_val):
return False
if isinstance(json_val, dict):
ret = True
for key, value in json_val.items():
if not _equal(value, back_json_val[key]):
ret = False
break
elif isinstance(json_val, list):
ret = True
for i, value in enumerate(json_val):
if not _equal(value, back_json_val[i]):
ret = False
break
else:
ret = (json_val == back_json_val)
return ret
def test_parse():
json_dict = {'a': 1, 'b': 2,
'c': ['foo', {'bar': None, 'done': False}],
'this_is_json': True}
res = cl_json._cl_from_json(json_dict)
assert isinstance(res, KQMLList)
assert len(res) == 2*len(json_dict.keys())
back_dict = cl_json.cl_to_json(res)
assert len(back_dict) == len(json_dict)
assert _equal(json_dict, back_dict)
|
6dd546d97710c99201af17c19e0f48a8c4702f72
|
tests/test_patspec.py
|
tests/test_patspec.py
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
def test_linear_h():
f = np.arange(9).reshape((3,3)) % 3 > 0
# This crashed in 0.95
# reported by Alexandre Harano
g = pymorph.patspec(f, 'linear-h')
|
Test case for newly reported bug
|
TST: Test case for newly reported bug
This was reported by Alexandre Harano.
|
Python
|
bsd-3-clause
|
luispedro/pymorph
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
TST: Test case for newly reported bug
This was reported by Alexandre Harano.
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
def test_linear_h():
f = np.arange(9).reshape((3,3)) % 3 > 0
# This crashed in 0.95
# reported by Alexandre Harano
g = pymorph.patspec(f, 'linear-h')
|
<commit_before>import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
<commit_msg>TST: Test case for newly reported bug
This was reported by Alexandre Harano.<commit_after>
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
def test_linear_h():
f = np.arange(9).reshape((3,3)) % 3 > 0
# This crashed in 0.95
# reported by Alexandre Harano
g = pymorph.patspec(f, 'linear-h')
|
import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
TST: Test case for newly reported bug
This was reported by Alexandre Harano.import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
def test_linear_h():
f = np.arange(9).reshape((3,3)) % 3 > 0
# This crashed in 0.95
# reported by Alexandre Harano
g = pymorph.patspec(f, 'linear-h')
|
<commit_before>import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
<commit_msg>TST: Test case for newly reported bug
This was reported by Alexandre Harano.<commit_after>import pymorph
import numpy as np
def test_patspec():
f = np.array([
[0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,0,0],
[0,1,0,1,1,1,0,0],
[0,0,1,1,1,1,0,0],
[1,1,0,0,0,0,0,0]], bool)
assert pymorph.patspec(f).sum() == (f > 0).sum()
def test_linear_h():
f = np.arange(9).reshape((3,3)) % 3 > 0
# This crashed in 0.95
# reported by Alexandre Harano
g = pymorph.patspec(f, 'linear-h')
|
d971fbb4dc3b69e012b212cd54b6e8511571e1f5
|
graphene/core/classtypes/uniontype.py
|
graphene/core/classtypes/uniontype.py
|
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=lambda instance, info: cls._resolve_type(schema, instance, info),
description=cls._meta.description,
)
|
from functools import partial
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=partial(cls._resolve_type, schema),
description=cls._meta.description,
)
|
Update to use partial instead of lambda function
|
Update to use partial instead of lambda function
|
Python
|
mit
|
sjhewitt/graphene,graphql-python/graphene,sjhewitt/graphene,Globegitter/graphene,graphql-python/graphene,Globegitter/graphene
|
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=lambda instance, info: cls._resolve_type(schema, instance, info),
description=cls._meta.description,
)
Update to use partial instead of lambda function
|
from functools import partial
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=partial(cls._resolve_type, schema),
description=cls._meta.description,
)
|
<commit_before>import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=lambda instance, info: cls._resolve_type(schema, instance, info),
description=cls._meta.description,
)
<commit_msg>Update to use partial instead of lambda function<commit_after>
|
from functools import partial
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=partial(cls._resolve_type, schema),
description=cls._meta.description,
)
|
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=lambda instance, info: cls._resolve_type(schema, instance, info),
description=cls._meta.description,
)
Update to use partial instead of lambda functionfrom functools import partial
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=partial(cls._resolve_type, schema),
description=cls._meta.description,
)
|
<commit_before>import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=lambda instance, info: cls._resolve_type(schema, instance, info),
description=cls._meta.description,
)
<commit_msg>Update to use partial instead of lambda function<commit_after>from functools import partial
import six
from graphql.core.type import GraphQLUnionType
from .base import FieldsClassType, FieldsClassTypeMeta, FieldsOptions
class UnionTypeOptions(FieldsOptions):
def __init__(self, *args, **kwargs):
super(UnionTypeOptions, self).__init__(*args, **kwargs)
self.types = []
class UnionTypeMeta(FieldsClassTypeMeta):
options_class = UnionTypeOptions
def get_options(cls, meta):
return cls.options_class(meta, types=[])
class UnionType(six.with_metaclass(UnionTypeMeta, FieldsClassType)):
class Meta:
abstract = True
@classmethod
def _resolve_type(cls, schema, instance, *args):
return schema.T(instance.__class__)
@classmethod
def internal_type(cls, schema):
if cls._meta.abstract:
raise Exception("Abstract ObjectTypes don't have a specific type.")
return GraphQLUnionType(
cls._meta.type_name,
types=list(map(schema.T, cls._meta.types)),
resolve_type=partial(cls._resolve_type, schema),
description=cls._meta.description,
)
|
068d44af407da3835bc96717700bd174480060ec
|
apps/maps/json_view.py
|
apps/maps/json_view.py
|
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
from datetime import timedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import ValuesQuerySet
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
if isinstance(obj, ValuesQuerySet):
return [item for item in obj]
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
Allow querysets to be jsonified
|
Allow querysets to be jsonified
|
Python
|
agpl-3.0
|
IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site
|
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
Allow querysets to be jsonified
|
from datetime import timedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import ValuesQuerySet
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
if isinstance(obj, ValuesQuerySet):
return [item for item in obj]
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
<commit_before>
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
<commit_msg>Allow querysets to be jsonified<commit_after>
|
from datetime import timedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import ValuesQuerySet
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
if isinstance(obj, ValuesQuerySet):
return [item for item in obj]
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
Allow querysets to be jsonified
from datetime import timedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import ValuesQuerySet
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
if isinstance(obj, ValuesQuerySet):
return [item for item in obj]
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
<commit_before>
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
<commit_msg>Allow querysets to be jsonified<commit_after>
from datetime import timedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import ValuesQuerySet
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import JsonResponse, HttpResponse
from django.conf import settings
class DjangoJSONEncoder2(DjangoJSONEncoder):
"""A json encoder to deal with the python objects we may want to encode"""
def default(self, obj):
if isinstance(obj, timedelta):
ARGS = ('days', 'seconds', 'microseconds')
return {'__type__': 'datetime.timedelta',
'args': [getattr(obj, a) for a in ARGS]}
if isinstance(obj, ValuesQuerySet):
return [item for item in obj]
return DjangoJSONEncoder.default(self, obj)
class JsonView(View):
"""Quickly serve a python data structure as json"""
cache_timeout = 5 * 60 * 60 * 24
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
def _dispatch(request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
return cache_page(self.get_cache_timeout())(_dispatch)(*args, **kwargs)
def render_to_response(self, context, **kwargs):
return JsonResponse(context, encoder=DjangoJSONEncoder2)
|
134fb48961a03bc17b34154b54875b543f1f27b8
|
legcoscraper/scripts/report-summary.py
|
legcoscraper/scripts/report-summary.py
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
pprint(dict(type_count))
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
import re
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
year_count = Counter()
year_regex = re.compile('(?P<year>\d\d\d\d)')
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
if data_obj['type'] == 'HansardRecord':
if data_obj.has_key('date'):
print data_obj['date'].encode('utf-8')
match = year_regex.search(data_obj['date'])
year = int(match.groupdict()['year'])
year_count[year] += 1
else:
print "NO DATE"
pprint(data_obj)
pprint(dict(type_count))
pprint(dict(year_count))
|
Print count summaries for Hansard by year
|
Print count summaries for Hansard by year
|
Python
|
mit
|
comsaint/legco-watch,legco-watch/legco-watch,comsaint/legco-watch,legco-watch/legco-watch,legco-watch/legco-watch,comsaint/legco-watch,comsaint/legco-watch,legco-watch/legco-watch
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
pprint(dict(type_count))
Print count summaries for Hansard by year
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
import re
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
year_count = Counter()
year_regex = re.compile('(?P<year>\d\d\d\d)')
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
if data_obj['type'] == 'HansardRecord':
if data_obj.has_key('date'):
print data_obj['date'].encode('utf-8')
match = year_regex.search(data_obj['date'])
year = int(match.groupdict()['year'])
year_count[year] += 1
else:
print "NO DATE"
pprint(data_obj)
pprint(dict(type_count))
pprint(dict(year_count))
|
<commit_before>#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
pprint(dict(type_count))
<commit_msg>Print count summaries for Hansard by year<commit_after>
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
import re
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
year_count = Counter()
year_regex = re.compile('(?P<year>\d\d\d\d)')
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
if data_obj['type'] == 'HansardRecord':
if data_obj.has_key('date'):
print data_obj['date'].encode('utf-8')
match = year_regex.search(data_obj['date'])
year = int(match.groupdict()['year'])
year_count[year] += 1
else:
print "NO DATE"
pprint(data_obj)
pprint(dict(type_count))
pprint(dict(year_count))
|
#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
pprint(dict(type_count))
Print count summaries for Hansard by year#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
import re
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
year_count = Counter()
year_regex = re.compile('(?P<year>\d\d\d\d)')
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
if data_obj['type'] == 'HansardRecord':
if data_obj.has_key('date'):
print data_obj['date'].encode('utf-8')
match = year_regex.search(data_obj['date'])
year = int(match.groupdict()['year'])
year_count[year] += 1
else:
print "NO DATE"
pprint(data_obj)
pprint(dict(type_count))
pprint(dict(year_count))
|
<commit_before>#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
pprint(dict(type_count))
<commit_msg>Print count summaries for Hansard by year<commit_after>#!/usr/bin/env python
#
# Give a quick summary of data which has been retrieved
#
import argparse
import json
from collections import Counter
from pprint import pprint
import re
parser = argparse.ArgumentParser()
parser.add_argument("json_file", type=str, help="JSON data file from scraper")
args = parser.parse_args()
type_count = Counter()
year_count = Counter()
year_regex = re.compile('(?P<year>\d\d\d\d)')
infile = open(args.json_file, 'r')
for line in infile.readlines():
try:
data_obj = json.loads(line)
except:
pass
type_count[data_obj['type']] += 1
if data_obj['type'] == 'HansardRecord':
if data_obj.has_key('date'):
print data_obj['date'].encode('utf-8')
match = year_regex.search(data_obj['date'])
year = int(match.groupdict()['year'])
year_count[year] += 1
else:
print "NO DATE"
pprint(data_obj)
pprint(dict(type_count))
pprint(dict(year_count))
|
b6c44fb951950c72e09004b4478e497e8dcfa2b0
|
mysite/urls.py
|
mysite/urls.py
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
(r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
(r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
# (r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
# (r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
Disable broken ("for now") views
|
Disable broken ("for now") views
|
Python
|
agpl-3.0
|
campbe13/openhatch,eeshangarg/oh-mainline,waseem18/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,heeraj123/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,mzdaniel/oh-mainline,SnappleCap/oh-mainline,jledbetter/openhatch,nirmeshk/oh-mainline,nirmeshk/oh-mainline,ojengwa/oh-mainline,ehashman/oh-mainline,SnappleCap/oh-mainline,eeshangarg/oh-mainline,openhatch/oh-mainline,jledbetter/openhatch,willingc/oh-mainline,Changaco/oh-mainline,sudheesh001/oh-mainline,jledbetter/openhatch,campbe13/openhatch,jledbetter/openhatch,vipul-sharma20/oh-mainline,eeshangarg/oh-mainline,openhatch/oh-mainline,Changaco/oh-mainline,eeshangarg/oh-mainline,vipul-sharma20/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,sudheesh001/oh-mainline,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,campbe13/openhatch,Changaco/oh-mainline,mzdaniel/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,nirmeshk/oh-mainline,openhatch/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,ehashman/oh-mainline,moijes12/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,jledbetter/openhatch,moijes12/oh-mainline,sudheesh001/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,willingc/oh-mainline,waseem18/oh-mainline,mzdaniel/oh-mainline,sudheesh001/oh-mainline,Changaco/oh-mainline,waseem18/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
(r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
(r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
Disable broken ("for now") views
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
# (r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
# (r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
<commit_before>from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
(r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
(r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
<commit_msg>Disable broken ("for now") views<commit_after>
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
# (r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
# (r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
(r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
(r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
Disable broken ("for now") viewsfrom django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
# (r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
# (r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
<commit_before>from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
(r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
(r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
<commit_msg>Disable broken ("for now") views<commit_after>from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^search/$', 'mysite.search.views.index'),
# (r'^search/query/(?P<query>\w+)/$', 'mysite.search.views.query'),
# (r'^search/query_json/(?P<query>\w+)/$', 'mysite.search.views.query_json'),
(r'^admin/(.*)', admin.site.root),
)
|
87cbd9f4a69cb4895dc7baec84e9c1044d4b6ad4
|
bridge/mavlink-zmq-bridge.py
|
bridge/mavlink-zmq-bridge.py
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
print topic
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
Remove printing of topics in bridge
|
Remove printing of topics in bridge
|
Python
|
bsd-2-clause
|
btashton/mavlink-zmq,btashton/mavlink-zmq
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
print topic
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
Remove printing of topics in bridge
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
<commit_before>import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
print topic
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
<commit_msg>Remove printing of topics in bridge<commit_after>
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
print topic
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
Remove printing of topics in bridgeimport zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
<commit_before>import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
print topic
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
<commit_msg>Remove printing of topics in bridge<commit_after>import zmq
from argparse import ArgumentParser
from pymavlink import mavutil
def main():
parser = ArgumentParser()
parser.add_argument("--device", help="MAVLink device to add to zmq", required=True)
parser.add_argument("--zmq", help="zmq url", required=True)
args = parser.parse_args()
try:
msrc = mavutil.mavlink_connection(args.device, planner_format=False,
notimestamps=True, robust_parsing=True)
except Exception, e:
print 'Could not connect to mavlink device at %s' % args.device
print e
return False
context = zmq.Context()
zmq_socket = context.socket(zmq.PUB)
try:
zmq_socket.connect(args.zmq)
except Exception, e:
print 'Failed to establish connection with zmq gateway'
print e
#send messages from mavlink connection to zmq gateway
try:
while True:
mav_msg = msrc.recv_match()
if mav_msg is not None:
topic = mav_msg.get_type()
zmq_socket.send(topic,zmq.SNDMORE)
zmq_socket.send_pyobj(mav_msg)
except Exception, e:
print 'Bridge failed'
print e
zmq_socket.close()
context.term()
if __name__ == "__main__":
main()
|
cb099abc5a59d3824e767e5dd094cfea6f066a0a
|
libqtile/command.py
|
libqtile/command.py
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandObject
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandObject())
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandInterface
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandInterface())
|
Fix up deprecated lazy import
|
Fix up deprecated lazy import
|
Python
|
mit
|
ramnes/qtile,qtile/qtile,ramnes/qtile,tych0/qtile,qtile/qtile,tych0/qtile
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandObject
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandObject())
Fix up deprecated lazy import
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandInterface
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandInterface())
|
<commit_before># Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandObject
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandObject())
<commit_msg>Fix up deprecated lazy import<commit_after>
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandInterface
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandInterface())
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandObject
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandObject())
Fix up deprecated lazy import# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandInterface
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandInterface())
|
<commit_before># Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandObject
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandObject())
<commit_msg>Fix up deprecated lazy import<commit_after># Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The deprecated lazy command objects
"""
import warnings
from libqtile.command_client import InteractiveCommandClient
from libqtile.lazy import LazyCommandInterface
class _LazyTree(InteractiveCommandClient):
def __getattr__(self, name: str) -> InteractiveCommandClient:
"""Get the child element of the currently selected object"""
warnings.warn("libqtile.command.lazy is deprecated, use libqtile.lazy.lazy", DeprecationWarning)
return super().__getattr__(name)
lazy = _LazyTree(LazyCommandInterface())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.