commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c7a2dfa1890f6332021dd77c6dc119f9fdf6f31
|
nova/tests/test_sqlalchemy.py
|
nova/tests/test_sqlalchemy.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
from eventlet import db_pool
try:
import MySQLdb
except ImportError:
MySQLdb = None
from nova import context
from nova.db.sqlalchemy import session
from nova import test
class DbPoolTestCase(test.TestCase):
def setUp(self):
super(DbPoolTestCase, self).setUp()
self.flags(sql_dbpool_enable=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
if not MySQLdb:
self.skipTest("Unable to test due to lack of MySQLdb")
def test_db_pool_option(self):
self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
sql_max_pool_size=42)
info = {}
class FakeConnectionPool(db_pool.ConnectionPool):
def __init__(self, mod_name, **kwargs):
info['module'] = mod_name
info['kwargs'] = kwargs
super(FakeConnectionPool, self).__init__(mod_name,
**kwargs)
def connect(self, *args, **kwargs):
raise test.TestingException()
self.stubs.Set(db_pool, 'ConnectionPool',
FakeConnectionPool)
sql_connection = 'mysql://user:pass@127.0.0.1/nova'
self.assertRaises(test.TestingException, session.create_engine,
sql_connection)
self.assertEqual(info['module'], MySQLdb)
self.assertEqual(info['kwargs']['max_idle'], 11)
self.assertEqual(info['kwargs']['min_size'], 21)
self.assertEqual(info['kwargs']['max_size'], 42)
|
Add eventlet db_pool use for mysql
|
Add eventlet db_pool use for mysql
This adds the use of eventlet's db_pool module so that we can make mysql
calls without blocking the whole process.
New config options are introduced:
sql_dbpool_enable -- Enables the use of eventlet's db_pool
sql_min_pool_size -- Set the minimum number of SQL connections
The default for sql_dbpool_enable is False for now, so there is
no forced behavior changes for those using mysql. sql_min_pool_size
is defaulted to 1 to match behavior if not using db_pool.
Adds a new test module for our sqlalchemy code, testing this new option
as much as is possible without requiring mysql server to be running.
DocImpact
Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
Add eventlet db_pool use for mysql
This adds the use of eventlet's db_pool module so that we can make mysql
calls without blocking the whole process.
New config options are introduced:
sql_dbpool_enable -- Enables the use of eventlet's db_pool
sql_min_pool_size -- Set the minimum number of SQL connections
The default for sql_dbpool_enable is False for now, so there is
no forced behavior changes for those using mysql. sql_min_pool_size
is defaulted to 1 to match behavior if not using db_pool.
Adds a new test module for our sqlalchemy code, testing this new option
as much as is possible without requiring mysql server to be running.
DocImpact
Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
from eventlet import db_pool
try:
import MySQLdb
except ImportError:
MySQLdb = None
from nova import context
from nova.db.sqlalchemy import session
from nova import test
class DbPoolTestCase(test.TestCase):
def setUp(self):
super(DbPoolTestCase, self).setUp()
self.flags(sql_dbpool_enable=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
if not MySQLdb:
self.skipTest("Unable to test due to lack of MySQLdb")
def test_db_pool_option(self):
self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
sql_max_pool_size=42)
info = {}
class FakeConnectionPool(db_pool.ConnectionPool):
def __init__(self, mod_name, **kwargs):
info['module'] = mod_name
info['kwargs'] = kwargs
super(FakeConnectionPool, self).__init__(mod_name,
**kwargs)
def connect(self, *args, **kwargs):
raise test.TestingException()
self.stubs.Set(db_pool, 'ConnectionPool',
FakeConnectionPool)
sql_connection = 'mysql://user:pass@127.0.0.1/nova'
self.assertRaises(test.TestingException, session.create_engine,
sql_connection)
self.assertEqual(info['module'], MySQLdb)
self.assertEqual(info['kwargs']['max_idle'], 11)
self.assertEqual(info['kwargs']['min_size'], 21)
self.assertEqual(info['kwargs']['max_size'], 42)
|
<commit_before><commit_msg>Add eventlet db_pool use for mysql
This adds the use of eventlet's db_pool module so that we can make mysql
calls without blocking the whole process.
New config options are introduced:
sql_dbpool_enable -- Enables the use of eventlet's db_pool
sql_min_pool_size -- Set the minimum number of SQL connections
The default for sql_dbpool_enable is False for now, so there is
no forced behavior changes for those using mysql. sql_min_pool_size
is defaulted to 1 to match behavior if not using db_pool.
Adds a new test module for our sqlalchemy code, testing this new option
as much as is possible without requiring mysql server to be running.
DocImpact
Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
from eventlet import db_pool
try:
import MySQLdb
except ImportError:
MySQLdb = None
from nova import context
from nova.db.sqlalchemy import session
from nova import test
class DbPoolTestCase(test.TestCase):
def setUp(self):
super(DbPoolTestCase, self).setUp()
self.flags(sql_dbpool_enable=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
if not MySQLdb:
self.skipTest("Unable to test due to lack of MySQLdb")
def test_db_pool_option(self):
self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
sql_max_pool_size=42)
info = {}
class FakeConnectionPool(db_pool.ConnectionPool):
def __init__(self, mod_name, **kwargs):
info['module'] = mod_name
info['kwargs'] = kwargs
super(FakeConnectionPool, self).__init__(mod_name,
**kwargs)
def connect(self, *args, **kwargs):
raise test.TestingException()
self.stubs.Set(db_pool, 'ConnectionPool',
FakeConnectionPool)
sql_connection = 'mysql://user:pass@127.0.0.1/nova'
self.assertRaises(test.TestingException, session.create_engine,
sql_connection)
self.assertEqual(info['module'], MySQLdb)
self.assertEqual(info['kwargs']['max_idle'], 11)
self.assertEqual(info['kwargs']['min_size'], 21)
self.assertEqual(info['kwargs']['max_size'], 42)
|
Add eventlet db_pool use for mysql
This adds the use of eventlet's db_pool module so that we can make mysql
calls without blocking the whole process.
New config options are introduced:
sql_dbpool_enable -- Enables the use of eventlet's db_pool
sql_min_pool_size -- Set the minimum number of SQL connections
The default for sql_dbpool_enable is False for now, so there is
no forced behavior changes for those using mysql. sql_min_pool_size
is defaulted to 1 to match behavior if not using db_pool.
Adds a new test module for our sqlalchemy code, testing this new option
as much as is possible without requiring mysql server to be running.
DocImpact
Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
from eventlet import db_pool
try:
import MySQLdb
except ImportError:
MySQLdb = None
from nova import context
from nova.db.sqlalchemy import session
from nova import test
class DbPoolTestCase(test.TestCase):
def setUp(self):
super(DbPoolTestCase, self).setUp()
self.flags(sql_dbpool_enable=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
if not MySQLdb:
self.skipTest("Unable to test due to lack of MySQLdb")
def test_db_pool_option(self):
self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
sql_max_pool_size=42)
info = {}
class FakeConnectionPool(db_pool.ConnectionPool):
def __init__(self, mod_name, **kwargs):
info['module'] = mod_name
info['kwargs'] = kwargs
super(FakeConnectionPool, self).__init__(mod_name,
**kwargs)
def connect(self, *args, **kwargs):
raise test.TestingException()
self.stubs.Set(db_pool, 'ConnectionPool',
FakeConnectionPool)
sql_connection = 'mysql://user:pass@127.0.0.1/nova'
self.assertRaises(test.TestingException, session.create_engine,
sql_connection)
self.assertEqual(info['module'], MySQLdb)
self.assertEqual(info['kwargs']['max_idle'], 11)
self.assertEqual(info['kwargs']['min_size'], 21)
self.assertEqual(info['kwargs']['max_size'], 42)
|
<commit_before><commit_msg>Add eventlet db_pool use for mysql
This adds the use of eventlet's db_pool module so that we can make mysql
calls without blocking the whole process.
New config options are introduced:
sql_dbpool_enable -- Enables the use of eventlet's db_pool
sql_min_pool_size -- Set the minimum number of SQL connections
The default for sql_dbpool_enable is False for now, so there is
no forced behavior changes for those using mysql. sql_min_pool_size
is defaulted to 1 to match behavior if not using db_pool.
Adds a new test module for our sqlalchemy code, testing this new option
as much as is possible without requiring mysql server to be running.
DocImpact
Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for SQLAlchemy specific code."""
from eventlet import db_pool
try:
import MySQLdb
except ImportError:
MySQLdb = None
from nova import context
from nova.db.sqlalchemy import session
from nova import test
class DbPoolTestCase(test.TestCase):
def setUp(self):
super(DbPoolTestCase, self).setUp()
self.flags(sql_dbpool_enable=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
if not MySQLdb:
self.skipTest("Unable to test due to lack of MySQLdb")
def test_db_pool_option(self):
self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
sql_max_pool_size=42)
info = {}
class FakeConnectionPool(db_pool.ConnectionPool):
def __init__(self, mod_name, **kwargs):
info['module'] = mod_name
info['kwargs'] = kwargs
super(FakeConnectionPool, self).__init__(mod_name,
**kwargs)
def connect(self, *args, **kwargs):
raise test.TestingException()
self.stubs.Set(db_pool, 'ConnectionPool',
FakeConnectionPool)
sql_connection = 'mysql://user:pass@127.0.0.1/nova'
self.assertRaises(test.TestingException, session.create_engine,
sql_connection)
self.assertEqual(info['module'], MySQLdb)
self.assertEqual(info['kwargs']['max_idle'], 11)
self.assertEqual(info['kwargs']['min_size'], 21)
self.assertEqual(info['kwargs']['max_size'], 42)
|
|
fa830a7a8f7a2b34df311b8d85788ccd5531bb30
|
gen_noise.py
|
gen_noise.py
|
#!/usr/bin/env python3
"""Generates smooth noise images."""
import argparse
import numpy as np
from PIL import Image
from num_utils import resize
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', '-o', default='noise.png', help='the output filename')
parser.add_argument('--size', '-s', type=int, nargs=2, metavar=('W', 'H'),
default=[512, 512], help='the image size to output')
parser.add_argument('--power', type=float, default=2, help='the bias toward large/small scales')
parser.add_argument('--mean', type=float, default=0.5, help='the output mean')
parser.add_argument('--std', type=float, default=0.1, help='the output standard deviation')
args = parser.parse_args()
scales = np.int32(max(np.ceil(np.log2(args.size)))) + 1
for scale in range(scales):
if scale == 0:
canvas = np.zeros((1, 1), np.float32)
else:
canvas = resize(canvas, (2**scale, 2**scale))
canvas += np.random.uniform(-1, 1, canvas.shape) / (scale+1)**2
crop = canvas[:args.size[1], :args.size[0]]
crop *= args.std / np.std(crop)
crop += args.mean - np.mean(crop)
Image.fromarray(np.uint8(np.clip(crop * 255, 0, 255))).save(args.output)
if __name__ == '__main__':
main()
|
Add noise image generator script
|
Add noise image generator script
|
Python
|
mit
|
crowsonkb/style_transfer,crowsonkb/style_transfer,crowsonkb/style_transfer,crowsonkb/style_transfer
|
Add noise image generator script
|
#!/usr/bin/env python3
"""Generates smooth noise images."""
import argparse
import numpy as np
from PIL import Image
from num_utils import resize
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', '-o', default='noise.png', help='the output filename')
parser.add_argument('--size', '-s', type=int, nargs=2, metavar=('W', 'H'),
default=[512, 512], help='the image size to output')
parser.add_argument('--power', type=float, default=2, help='the bias toward large/small scales')
parser.add_argument('--mean', type=float, default=0.5, help='the output mean')
parser.add_argument('--std', type=float, default=0.1, help='the output standard deviation')
args = parser.parse_args()
scales = np.int32(max(np.ceil(np.log2(args.size)))) + 1
for scale in range(scales):
if scale == 0:
canvas = np.zeros((1, 1), np.float32)
else:
canvas = resize(canvas, (2**scale, 2**scale))
canvas += np.random.uniform(-1, 1, canvas.shape) / (scale+1)**2
crop = canvas[:args.size[1], :args.size[0]]
crop *= args.std / np.std(crop)
crop += args.mean - np.mean(crop)
Image.fromarray(np.uint8(np.clip(crop * 255, 0, 255))).save(args.output)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add noise image generator script<commit_after>
|
#!/usr/bin/env python3
"""Generates smooth noise images."""
import argparse
import numpy as np
from PIL import Image
from num_utils import resize
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', '-o', default='noise.png', help='the output filename')
parser.add_argument('--size', '-s', type=int, nargs=2, metavar=('W', 'H'),
default=[512, 512], help='the image size to output')
parser.add_argument('--power', type=float, default=2, help='the bias toward large/small scales')
parser.add_argument('--mean', type=float, default=0.5, help='the output mean')
parser.add_argument('--std', type=float, default=0.1, help='the output standard deviation')
args = parser.parse_args()
scales = np.int32(max(np.ceil(np.log2(args.size)))) + 1
for scale in range(scales):
if scale == 0:
canvas = np.zeros((1, 1), np.float32)
else:
canvas = resize(canvas, (2**scale, 2**scale))
canvas += np.random.uniform(-1, 1, canvas.shape) / (scale+1)**2
crop = canvas[:args.size[1], :args.size[0]]
crop *= args.std / np.std(crop)
crop += args.mean - np.mean(crop)
Image.fromarray(np.uint8(np.clip(crop * 255, 0, 255))).save(args.output)
if __name__ == '__main__':
main()
|
Add noise image generator script#!/usr/bin/env python3
"""Generates smooth noise images."""
import argparse
import numpy as np
from PIL import Image
from num_utils import resize
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', '-o', default='noise.png', help='the output filename')
parser.add_argument('--size', '-s', type=int, nargs=2, metavar=('W', 'H'),
default=[512, 512], help='the image size to output')
parser.add_argument('--power', type=float, default=2, help='the bias toward large/small scales')
parser.add_argument('--mean', type=float, default=0.5, help='the output mean')
parser.add_argument('--std', type=float, default=0.1, help='the output standard deviation')
args = parser.parse_args()
scales = np.int32(max(np.ceil(np.log2(args.size)))) + 1
for scale in range(scales):
if scale == 0:
canvas = np.zeros((1, 1), np.float32)
else:
canvas = resize(canvas, (2**scale, 2**scale))
canvas += np.random.uniform(-1, 1, canvas.shape) / (scale+1)**2
crop = canvas[:args.size[1], :args.size[0]]
crop *= args.std / np.std(crop)
crop += args.mean - np.mean(crop)
Image.fromarray(np.uint8(np.clip(crop * 255, 0, 255))).save(args.output)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add noise image generator script<commit_after>#!/usr/bin/env python3
"""Generates smooth noise images."""
import argparse
import numpy as np
from PIL import Image
from num_utils import resize
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', '-o', default='noise.png', help='the output filename')
parser.add_argument('--size', '-s', type=int, nargs=2, metavar=('W', 'H'),
default=[512, 512], help='the image size to output')
parser.add_argument('--power', type=float, default=2, help='the bias toward large/small scales')
parser.add_argument('--mean', type=float, default=0.5, help='the output mean')
parser.add_argument('--std', type=float, default=0.1, help='the output standard deviation')
args = parser.parse_args()
scales = np.int32(max(np.ceil(np.log2(args.size)))) + 1
for scale in range(scales):
if scale == 0:
canvas = np.zeros((1, 1), np.float32)
else:
canvas = resize(canvas, (2**scale, 2**scale))
canvas += np.random.uniform(-1, 1, canvas.shape) / (scale+1)**2
crop = canvas[:args.size[1], :args.size[0]]
crop *= args.std / np.std(crop)
crop += args.mean - np.mean(crop)
Image.fromarray(np.uint8(np.clip(crop * 255, 0, 255))).save(args.output)
if __name__ == '__main__':
main()
|
|
af1d2e6e3e5df1ad08f977e83208de29d22cd391
|
experiments/func_from_partial_order/func_from_partial_order.py
|
experiments/func_from_partial_order/func_from_partial_order.py
|
# Goal:
#
# Given:
# - a set of samples (S)
# - an unknown evaluation function f of S (f: S -> [0; 1])
# - a partial order c of f over S:
# - comparisons of pairs of samples (C(i, j) in {-1,0,1}):
# c(i,j) = -1 iff f(S_i) < f(S_j)
# c(i,j) = 0 iff f(S_i) = f(S_j)
# c(i,j) = 1 iff f(S_i) > f(S_j)
# ie. c(i, j) = sign(f(S_i) - f(S_j))
#
# we'd like to estimate the function values of f at samples S
# that best fit with the partial order c.
#
# While it is possible that some comparisons in c would be conflicting,
# the goal is to minimize the error.
#
#
# notes:
# - we could compute g(S_i) = sum(c(i, j) for j in S if i != j)
# - the range of g would be [-N; N] for N = len(S)
# - in order to squash g to [0;1] we could use the logistic function
# - in order words:
# - samples compared mostly as higher then others would get high value of f
# and vice versa
import numpy as np
def partial_order(a, b, eps=0.2):
'''
a, b - values of a function with range [0;1]
'''
# return sign(a - b)
return eps_sign(a - b, eps)
# note: for continuous partial order use: f(a) - f(b)
def eps_sign(x, eps):
'''signum function with 0 value for abs(x) < eps'''
return sign(sign(x) * np.maximum(abs(x) - eps, 0))
def f(x):
return np.exp(-0.5 * x**2)
def random_samples(n, a=0, b=1):
return (b - a) * np.random.random(n) + a
def reconstruct(x, f, eps=0.2):
y_0 = f(x)
def rescale(v):
return (v + 1) * 0.5
# return v
def normalize(v):
return (v - np.min(v)) / (np.max(v) - np.min(v))
n = len(x)
return normalize(sum(partial_order(y_0, f(x[i]), eps) for i in range(0, n)))
|
Make an experiment for reconstructing a function from partial ordering of its values at samples points.
|
Make an experiment for reconstructing a function from partial ordering of its values at samples points.
|
Python
|
mit
|
bzamecnik/tfr,bzamecnik/tfr
|
Make an experiment for reconstructing a function from partial ordering of its values at samples points.
|
# Goal:
#
# Given:
# - a set of samples (S)
# - an unknown evaluation function f of S (f: S -> [0; 1])
# - a partial order c of f over S:
# - comparisons of pairs of samples (C(i, j) in {-1,0,1}):
# c(i,j) = -1 iff f(S_i) < f(S_j)
# c(i,j) = 0 iff f(S_i) = f(S_j)
# c(i,j) = 1 iff f(S_i) > f(S_j)
# ie. c(i, j) = sign(f(S_i) - f(S_j))
#
# we'd like to estimate the function values of f at samples S
# that best fit with the partial order c.
#
# While it is possible that some comparisons in c would be conflicting,
# the goal is to minimize the error.
#
#
# notes:
# - we could compute g(S_i) = sum(c(i, j) for j in S if i != j)
# - the range of g would be [-N; N] for N = len(S)
# - in order to squash g to [0;1] we could use the logistic function
# - in order words:
# - samples compared mostly as higher then others would get high value of f
# and vice versa
import numpy as np
def partial_order(a, b, eps=0.2):
'''
a, b - values of a function with range [0;1]
'''
# return sign(a - b)
return eps_sign(a - b, eps)
# note: for continuous partial order use: f(a) - f(b)
def eps_sign(x, eps):
'''signum function with 0 value for abs(x) < eps'''
return sign(sign(x) * np.maximum(abs(x) - eps, 0))
def f(x):
return np.exp(-0.5 * x**2)
def random_samples(n, a=0, b=1):
return (b - a) * np.random.random(n) + a
def reconstruct(x, f, eps=0.2):
y_0 = f(x)
def rescale(v):
return (v + 1) * 0.5
# return v
def normalize(v):
return (v - np.min(v)) / (np.max(v) - np.min(v))
n = len(x)
return normalize(sum(partial_order(y_0, f(x[i]), eps) for i in range(0, n)))
|
<commit_before><commit_msg>Make an experiment for reconstructing a function from partial ordering of its values at samples points.<commit_after>
|
# Goal:
#
# Given:
# - a set of samples (S)
# - an unknown evaluation function f of S (f: S -> [0; 1])
# - a partial order c of f over S:
# - comparisons of pairs of samples (C(i, j) in {-1,0,1}):
# c(i,j) = -1 iff f(S_i) < f(S_j)
# c(i,j) = 0 iff f(S_i) = f(S_j)
# c(i,j) = 1 iff f(S_i) > f(S_j)
# ie. c(i, j) = sign(f(S_i) - f(S_j))
#
# we'd like to estimate the function values of f at samples S
# that best fit with the partial order c.
#
# While it is possible that some comparisons in c would be conflicting,
# the goal is to minimize the error.
#
#
# notes:
# - we could compute g(S_i) = sum(c(i, j) for j in S if i != j)
# - the range of g would be [-N; N] for N = len(S)
# - in order to squash g to [0;1] we could use the logistic function
# - in order words:
# - samples compared mostly as higher then others would get high value of f
# and vice versa
import numpy as np
def partial_order(a, b, eps=0.2):
'''
a, b - values of a function with range [0;1]
'''
# return sign(a - b)
return eps_sign(a - b, eps)
# note: for continuous partial order use: f(a) - f(b)
def eps_sign(x, eps):
'''signum function with 0 value for abs(x) < eps'''
return sign(sign(x) * np.maximum(abs(x) - eps, 0))
def f(x):
return np.exp(-0.5 * x**2)
def random_samples(n, a=0, b=1):
return (b - a) * np.random.random(n) + a
def reconstruct(x, f, eps=0.2):
y_0 = f(x)
def rescale(v):
return (v + 1) * 0.5
# return v
def normalize(v):
return (v - np.min(v)) / (np.max(v) - np.min(v))
n = len(x)
return normalize(sum(partial_order(y_0, f(x[i]), eps) for i in range(0, n)))
|
Make an experiment for reconstructing a function from partial ordering of its values at samples points.# Goal:
#
# Given:
# - a set of samples (S)
# - an unknown evaluation function f of S (f: S -> [0; 1])
# - a partial order c of f over S:
# - comparisons of pairs of samples (C(i, j) in {-1,0,1}):
# c(i,j) = -1 iff f(S_i) < f(S_j)
# c(i,j) = 0 iff f(S_i) = f(S_j)
# c(i,j) = 1 iff f(S_i) > f(S_j)
# ie. c(i, j) = sign(f(S_i) - f(S_j))
#
# we'd like to estimate the function values of f at samples S
# that best fit with the partial order c.
#
# While it is possible that some comparisons in c would be conflicting,
# the goal is to minimize the error.
#
#
# notes:
# - we could compute g(S_i) = sum(c(i, j) for j in S if i != j)
# - the range of g would be [-N; N] for N = len(S)
# - in order to squash g to [0;1] we could use the logistic function
# - in order words:
# - samples compared mostly as higher then others would get high value of f
# and vice versa
import numpy as np
def partial_order(a, b, eps=0.2):
'''
a, b - values of a function with range [0;1]
'''
# return sign(a - b)
return eps_sign(a - b, eps)
# note: for continuous partial order use: f(a) - f(b)
def eps_sign(x, eps):
'''signum function with 0 value for abs(x) < eps'''
return sign(sign(x) * np.maximum(abs(x) - eps, 0))
def f(x):
return np.exp(-0.5 * x**2)
def random_samples(n, a=0, b=1):
return (b - a) * np.random.random(n) + a
def reconstruct(x, f, eps=0.2):
y_0 = f(x)
def rescale(v):
return (v + 1) * 0.5
# return v
def normalize(v):
return (v - np.min(v)) / (np.max(v) - np.min(v))
n = len(x)
return normalize(sum(partial_order(y_0, f(x[i]), eps) for i in range(0, n)))
|
<commit_before><commit_msg>Make an experiment for reconstructing a function from partial ordering of its values at samples points.<commit_after># Goal:
#
# Given:
# - a set of samples (S)
# - an unknown evaluation function f of S (f: S -> [0; 1])
# - a partial order c of f over S:
# - comparisons of pairs of samples (C(i, j) in {-1,0,1}):
# c(i,j) = -1 iff f(S_i) < f(S_j)
# c(i,j) = 0 iff f(S_i) = f(S_j)
# c(i,j) = 1 iff f(S_i) > f(S_j)
# ie. c(i, j) = sign(f(S_i) - f(S_j))
#
# we'd like to estimate the function values of f at samples S
# that best fit with the partial order c.
#
# While it is possible that some comparisons in c would be conflicting,
# the goal is to minimize the error.
#
#
# notes:
# - we could compute g(S_i) = sum(c(i, j) for j in S if i != j)
# - the range of g would be [-N; N] for N = len(S)
# - in order to squash g to [0;1] we could use the logistic function
# - in order words:
# - samples compared mostly as higher then others would get high value of f
# and vice versa
import numpy as np
def partial_order(a, b, eps=0.2):
'''
a, b - values of a function with range [0;1]
'''
# return sign(a - b)
return eps_sign(a - b, eps)
# note: for continuous partial order use: f(a) - f(b)
def eps_sign(x, eps):
'''signum function with 0 value for abs(x) < eps'''
return sign(sign(x) * np.maximum(abs(x) - eps, 0))
def f(x):
return np.exp(-0.5 * x**2)
def random_samples(n, a=0, b=1):
return (b - a) * np.random.random(n) + a
def reconstruct(x, f, eps=0.2):
y_0 = f(x)
def rescale(v):
return (v + 1) * 0.5
# return v
def normalize(v):
return (v - np.min(v)) / (np.max(v) - np.min(v))
n = len(x)
return normalize(sum(partial_order(y_0, f(x[i]), eps) for i in range(0, n)))
|
|
8389b23b3eefa3a4eeb84fa58837271ea1548514
|
validate.py
|
validate.py
|
import sys
import torch
import visdom
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import scores
def train(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
# Setup Model
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda(0)
gts, preds = [], []
for i, (images, labels) in tqdm(enumerate(valloader)):
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gts, preds, n_class=n_classes)
for k, v in score.items():
print k, v
for i in range(n_classes):
print i, class_iou[i]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--model_path', nargs='?', type=str, default='fcn8s_pascal_1_26.pkl',
help='Path to the saved model')
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--split', nargs='?', type=str, default='val',
help='Split of dataset to test on')
args = parser.parse_args()
train(args)
|
Make validation and test separate scripts
|
Make validation and test separate scripts
|
Python
|
mit
|
meetshah1995/pytorch-semseg,hzh8311/project,ibadami/pytorch-semseg
|
Make validation and test separate scripts
|
import sys
import torch
import visdom
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import scores
def train(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
# Setup Model
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda(0)
gts, preds = [], []
for i, (images, labels) in tqdm(enumerate(valloader)):
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gts, preds, n_class=n_classes)
for k, v in score.items():
print k, v
for i in range(n_classes):
print i, class_iou[i]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--model_path', nargs='?', type=str, default='fcn8s_pascal_1_26.pkl',
help='Path to the saved model')
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--split', nargs='?', type=str, default='val',
help='Split of dataset to test on')
args = parser.parse_args()
train(args)
|
<commit_before><commit_msg>Make validation and test separate scripts<commit_after>
|
import sys
import torch
import visdom
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import scores
def train(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
# Setup Model
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda(0)
gts, preds = [], []
for i, (images, labels) in tqdm(enumerate(valloader)):
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gts, preds, n_class=n_classes)
for k, v in score.items():
print k, v
for i in range(n_classes):
print i, class_iou[i]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--model_path', nargs='?', type=str, default='fcn8s_pascal_1_26.pkl',
help='Path to the saved model')
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--split', nargs='?', type=str, default='val',
help='Split of dataset to test on')
args = parser.parse_args()
train(args)
|
Make validation and test separate scriptsimport sys
import torch
import visdom
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import scores
def train(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
# Setup Model
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda(0)
gts, preds = [], []
for i, (images, labels) in tqdm(enumerate(valloader)):
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gts, preds, n_class=n_classes)
for k, v in score.items():
print k, v
for i in range(n_classes):
print i, class_iou[i]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--model_path', nargs='?', type=str, default='fcn8s_pascal_1_26.pkl',
help='Path to the saved model')
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--split', nargs='?', type=str, default='val',
help='Split of dataset to test on')
args = parser.parse_args()
train(args)
|
<commit_before><commit_msg>Make validation and test separate scripts<commit_after>import sys
import torch
import visdom
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import scores
def train(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
# Setup Model
model = torch.load(args.model_path)
if torch.cuda.is_available():
model.cuda(0)
gts, preds = [], []
for i, (images, labels) in tqdm(enumerate(valloader)):
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gts, preds, n_class=n_classes)
for k, v in score.items():
print k, v
for i in range(n_classes):
print i, class_iou[i]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--model_path', nargs='?', type=str, default='fcn8s_pascal_1_26.pkl',
help='Path to the saved model')
parser.add_argument('--dataset', nargs='?', type=str, default='pascal',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=256,
help='Height of the input image')
parser.add_argument('--batch_size', nargs='?', type=int, default=1,
help='Batch Size')
parser.add_argument('--split', nargs='?', type=str, default='val',
help='Split of dataset to test on')
args = parser.parse_args()
train(args)
|
|
e5cd42bf40ae66e22aa6ce486e84cf8f833ed338
|
opentreemap/treemap/migrations/0037_fix_plot_add_delete_permission_labels.py
|
opentreemap/treemap/migrations/0037_fix_plot_add_delete_permission_labels.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def change_labels(apps, term):
Permission = apps.get_model('auth', 'Permission')
add_plot = Permission.objects.filter(codename='add_plot')
delete_plot = Permission.objects.filter(codename='delete_plot')
add_plot.update(name='Can add {}'.format(term))
delete_plot.update(name='Can delete {}'.format(term))
def fix_labels(apps, schema_editor):
change_labels(apps, 'planting site')
def revert_labels(apps, schema_editor):
change_labels(apps, 'plot')
class Migration(migrations.Migration):
dependencies = [
('treemap', '0036_assign_role_add_delete_permissions'),
]
operations = [
migrations.RunPython(fix_labels, revert_labels),
]
|
Apply model permissions - plot permission name
|
Apply model permissions - plot permission name
Data migration to fix plot permissions to say can add or delete
**planting site**, rather than **plot**.
|
Python
|
agpl-3.0
|
maurizi/otm-core,maurizi/otm-core,maurizi/otm-core,maurizi/otm-core
|
Apply model permissions - plot permission name
Data migration to fix plot permissions to say can add or delete
**planting site**, rather than **plot**.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def change_labels(apps, term):
Permission = apps.get_model('auth', 'Permission')
add_plot = Permission.objects.filter(codename='add_plot')
delete_plot = Permission.objects.filter(codename='delete_plot')
add_plot.update(name='Can add {}'.format(term))
delete_plot.update(name='Can delete {}'.format(term))
def fix_labels(apps, schema_editor):
change_labels(apps, 'planting site')
def revert_labels(apps, schema_editor):
change_labels(apps, 'plot')
class Migration(migrations.Migration):
dependencies = [
('treemap', '0036_assign_role_add_delete_permissions'),
]
operations = [
migrations.RunPython(fix_labels, revert_labels),
]
|
<commit_before><commit_msg>Apply model permissions - plot permission name
Data migration to fix plot permissions to say can add or delete
**planting site**, rather than **plot**.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def change_labels(apps, term):
Permission = apps.get_model('auth', 'Permission')
add_plot = Permission.objects.filter(codename='add_plot')
delete_plot = Permission.objects.filter(codename='delete_plot')
add_plot.update(name='Can add {}'.format(term))
delete_plot.update(name='Can delete {}'.format(term))
def fix_labels(apps, schema_editor):
change_labels(apps, 'planting site')
def revert_labels(apps, schema_editor):
change_labels(apps, 'plot')
class Migration(migrations.Migration):
dependencies = [
('treemap', '0036_assign_role_add_delete_permissions'),
]
operations = [
migrations.RunPython(fix_labels, revert_labels),
]
|
Apply model permissions - plot permission name
Data migration to fix plot permissions to say can add or delete
**planting site**, rather than **plot**.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def change_labels(apps, term):
Permission = apps.get_model('auth', 'Permission')
add_plot = Permission.objects.filter(codename='add_plot')
delete_plot = Permission.objects.filter(codename='delete_plot')
add_plot.update(name='Can add {}'.format(term))
delete_plot.update(name='Can delete {}'.format(term))
def fix_labels(apps, schema_editor):
change_labels(apps, 'planting site')
def revert_labels(apps, schema_editor):
change_labels(apps, 'plot')
class Migration(migrations.Migration):
dependencies = [
('treemap', '0036_assign_role_add_delete_permissions'),
]
operations = [
migrations.RunPython(fix_labels, revert_labels),
]
|
<commit_before><commit_msg>Apply model permissions - plot permission name
Data migration to fix plot permissions to say can add or delete
**planting site**, rather than **plot**.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def change_labels(apps, term):
Permission = apps.get_model('auth', 'Permission')
add_plot = Permission.objects.filter(codename='add_plot')
delete_plot = Permission.objects.filter(codename='delete_plot')
add_plot.update(name='Can add {}'.format(term))
delete_plot.update(name='Can delete {}'.format(term))
def fix_labels(apps, schema_editor):
change_labels(apps, 'planting site')
def revert_labels(apps, schema_editor):
change_labels(apps, 'plot')
class Migration(migrations.Migration):
dependencies = [
('treemap', '0036_assign_role_add_delete_permissions'),
]
operations = [
migrations.RunPython(fix_labels, revert_labels),
]
|
|
f0c0edcbb6d88cb9f7215c16128dd2471ee16789
|
imagersite/imager_images/migrations/0004_auto_20150728_1555.py
|
imagersite/imager_images/migrations/0004_auto_20150728_1555.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
Update photo and album models for non-required fields
|
Update photo and album models for non-required fields
|
Python
|
mit
|
jesseklein406/django-imager,jesseklein406/django-imager,jesseklein406/django-imager
|
Update photo and album models for non-required fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
<commit_before><commit_msg>Update photo and album models for non-required fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
Update photo and album models for non-required fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
<commit_before><commit_msg>Update photo and album models for non-required fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
|
7e7817fc5a90adf7b2fa4b8947dd46a75bc6e818
|
pystereovisiontoolkit.py
|
pystereovisiontoolkit.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture, and calibrate stereo cameras
#
#
# External dependencies
#
import argparse
import Calibration
import CvViewer
#
# Command line argument parser
#
parser = argparse.ArgumentParser( description='Camera calibration toolkit.' )
parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' )
parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' )
parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' )
parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' )
parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' )
parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' )
parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' )
args = parser.parse_args()
#
# Calibration pattern setup
#
if args.grid : Calibration.pattern_type = 'CirclesGrid'
Calibration.pattern_size = ( int(args.rows), int(args.cols) )
#
# Stereo camera live capture
#
if args.live :
# Stereo camera viewer
CvViewer.VmbStereoViewer()
#
# Mono camera calibration
#
elif args.mono :
# Launch calibration
Calibration.CameraCalibration( args.mono, args.debug )
|
Introduce a single program to rule them all...
|
Introduce a single program to rule them all...
|
Python
|
mit
|
microy/VisionToolkit,microy/VisionToolkit,microy/StereoVision,microy/PyStereoVisionToolkit,microy/StereoVision,microy/PyStereoVisionToolkit
|
Introduce a single program to rule them all...
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture, and calibrate stereo cameras
#
#
# External dependencies
#
import argparse
import Calibration
import CvViewer
#
# Command line argument parser
#
parser = argparse.ArgumentParser( description='Camera calibration toolkit.' )
parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' )
parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' )
parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' )
parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' )
parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' )
parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' )
parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' )
args = parser.parse_args()
#
# Calibration pattern setup
#
if args.grid : Calibration.pattern_type = 'CirclesGrid'
Calibration.pattern_size = ( int(args.rows), int(args.cols) )
#
# Stereo camera live capture
#
if args.live :
# Stereo camera viewer
CvViewer.VmbStereoViewer()
#
# Mono camera calibration
#
elif args.mono :
# Launch calibration
Calibration.CameraCalibration( args.mono, args.debug )
|
<commit_before><commit_msg>Introduce a single program to rule them all...<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture, and calibrate stereo cameras
#
#
# External dependencies
#
import argparse
import Calibration
import CvViewer
#
# Command line argument parser
#
parser = argparse.ArgumentParser( description='Camera calibration toolkit.' )
parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' )
parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' )
parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' )
parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' )
parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' )
parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' )
parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' )
args = parser.parse_args()
#
# Calibration pattern setup
#
if args.grid : Calibration.pattern_type = 'CirclesGrid'
Calibration.pattern_size = ( int(args.rows), int(args.cols) )
#
# Stereo camera live capture
#
if args.live :
# Stereo camera viewer
CvViewer.VmbStereoViewer()
#
# Mono camera calibration
#
elif args.mono :
# Launch calibration
Calibration.CameraCalibration( args.mono, args.debug )
|
Introduce a single program to rule them all...#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture, and calibrate stereo cameras
#
#
# External dependencies
#
import argparse
import Calibration
import CvViewer
#
# Command line argument parser
#
parser = argparse.ArgumentParser( description='Camera calibration toolkit.' )
parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' )
parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' )
parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' )
parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' )
parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' )
parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' )
parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' )
args = parser.parse_args()
#
# Calibration pattern setup
#
if args.grid : Calibration.pattern_type = 'CirclesGrid'
Calibration.pattern_size = ( int(args.rows), int(args.cols) )
#
# Stereo camera live capture
#
if args.live :
# Stereo camera viewer
CvViewer.VmbStereoViewer()
#
# Mono camera calibration
#
elif args.mono :
# Launch calibration
Calibration.CameraCalibration( args.mono, args.debug )
|
<commit_before><commit_msg>Introduce a single program to rule them all...<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture, and calibrate stereo cameras
#
#
# External dependencies
#
import argparse
import Calibration
import CvViewer
#
# Command line argument parser
#
parser = argparse.ArgumentParser( description='Camera calibration toolkit.' )
parser.add_argument( '-live', action='store_true', default=False, help='Stereo camera live display' )
parser.add_argument( '-rows', action='store', default=15, help='Number of rows in the chessboard pattern' )
parser.add_argument( '-cols', action='store', default=10, help='Number of columns in the chessboard pattern' )
parser.add_argument( '-grid', action='store_true', default=False, help='Asymmetric circles grid pattern' )
parser.add_argument( '-debug', action='store_true', default=False, help='Display the chessboard on each image' )
parser.add_argument( '-mono', action='store', help='Image file for mono camera calibration' )
parser.add_argument( '-stereo', action='store', nargs=2, metavar=('cam1', 'cam2'), help='Image file for stereo camera calibration' )
args = parser.parse_args()
#
# Calibration pattern setup
#
if args.grid : Calibration.pattern_type = 'CirclesGrid'
Calibration.pattern_size = ( int(args.rows), int(args.cols) )
#
# Stereo camera live capture
#
if args.live :
# Stereo camera viewer
CvViewer.VmbStereoViewer()
#
# Mono camera calibration
#
elif args.mono :
# Launch calibration
Calibration.CameraCalibration( args.mono, args.debug )
|
|
384e6ffcef5844b451dccd2fedbfdaef38851e1e
|
accelerator/migrations/0029_add_help_text_on_image_field.py
|
accelerator/migrations/0029_add_help_text_on_image_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-17 21:05
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0028_change_profile_help_texts'),
]
operations = [
migrations.AlterField(
model_name='entrepreneurprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='expertprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='memberprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
]
|
Add migration for image field
|
[AC-6187] Add migration for image field
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-6187] Add migration for image field
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-17 21:05
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0028_change_profile_help_texts'),
]
operations = [
migrations.AlterField(
model_name='entrepreneurprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='expertprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='memberprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
]
|
<commit_before><commit_msg>[AC-6187] Add migration for image field<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-17 21:05
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0028_change_profile_help_texts'),
]
operations = [
migrations.AlterField(
model_name='entrepreneurprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='expertprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='memberprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
]
|
[AC-6187] Add migration for image field# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-17 21:05
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0028_change_profile_help_texts'),
]
operations = [
migrations.AlterField(
model_name='entrepreneurprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='expertprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='memberprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
]
|
<commit_before><commit_msg>[AC-6187] Add migration for image field<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-17 21:05
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0028_change_profile_help_texts'),
]
operations = [
migrations.AlterField(
model_name='entrepreneurprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='expertprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
migrations.AlterField(
model_name='memberprofile',
name='image',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture'),
),
]
|
|
6e278c7e621b0bf1aa580cac128d6698dece7c93
|
vida/vida/migrations/0011_personlocationhistory.py
|
vida/vida/migrations/0011_personlocationhistory.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0010_person_geom'),
]
operations = [
migrations.CreateModel(
name='PersonLocationHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=b'POINT(0.0 0.0)', srid=4326)),
('start_date', models.DateTimeField(null=True)),
('stop_date', models.DateTimeField(null=True)),
('person', models.ForeignKey(to='vida.Person', on_delete=django.db.models.deletion.PROTECT)),
('shelter', models.ForeignKey(to='vida.Shelter', on_delete=django.db.models.deletion.PROTECT)),
],
),
]
|
Revert "Revert "Created the model and migration for person_location_history table""
|
Revert "Revert "Created the model and migration for person_location_history table""
This reverts commit baf53bd5b1fdcb0c1b39c60a2337d3f3d58cad95.
oops
|
Python
|
mit
|
ROGUE-JCTD/vida,ROGUE-JCTD/vida,ROGUE-JCTD/vida,ROGUE-JCTD/vida,ROGUE-JCTD/vida
|
Revert "Revert "Created the model and migration for person_location_history table""
This reverts commit baf53bd5b1fdcb0c1b39c60a2337d3f3d58cad95.
oops
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0010_person_geom'),
]
operations = [
migrations.CreateModel(
name='PersonLocationHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=b'POINT(0.0 0.0)', srid=4326)),
('start_date', models.DateTimeField(null=True)),
('stop_date', models.DateTimeField(null=True)),
('person', models.ForeignKey(to='vida.Person', on_delete=django.db.models.deletion.PROTECT)),
('shelter', models.ForeignKey(to='vida.Shelter', on_delete=django.db.models.deletion.PROTECT)),
],
),
]
|
<commit_before><commit_msg>Revert "Revert "Created the model and migration for person_location_history table""
This reverts commit baf53bd5b1fdcb0c1b39c60a2337d3f3d58cad95.
oops<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0010_person_geom'),
]
operations = [
migrations.CreateModel(
name='PersonLocationHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=b'POINT(0.0 0.0)', srid=4326)),
('start_date', models.DateTimeField(null=True)),
('stop_date', models.DateTimeField(null=True)),
('person', models.ForeignKey(to='vida.Person', on_delete=django.db.models.deletion.PROTECT)),
('shelter', models.ForeignKey(to='vida.Shelter', on_delete=django.db.models.deletion.PROTECT)),
],
),
]
|
Revert "Revert "Created the model and migration for person_location_history table""
This reverts commit baf53bd5b1fdcb0c1b39c60a2337d3f3d58cad95.
oops# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0010_person_geom'),
]
operations = [
migrations.CreateModel(
name='PersonLocationHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=b'POINT(0.0 0.0)', srid=4326)),
('start_date', models.DateTimeField(null=True)),
('stop_date', models.DateTimeField(null=True)),
('person', models.ForeignKey(to='vida.Person', on_delete=django.db.models.deletion.PROTECT)),
('shelter', models.ForeignKey(to='vida.Shelter', on_delete=django.db.models.deletion.PROTECT)),
],
),
]
|
<commit_before><commit_msg>Revert "Revert "Created the model and migration for person_location_history table""
This reverts commit baf53bd5b1fdcb0c1b39c60a2337d3f3d58cad95.
oops<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('vida', '0010_person_geom'),
]
operations = [
migrations.CreateModel(
name='PersonLocationHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=b'POINT(0.0 0.0)', srid=4326)),
('start_date', models.DateTimeField(null=True)),
('stop_date', models.DateTimeField(null=True)),
('person', models.ForeignKey(to='vida.Person', on_delete=django.db.models.deletion.PROTECT)),
('shelter', models.ForeignKey(to='vida.Shelter', on_delete=django.db.models.deletion.PROTECT)),
],
),
]
|
|
68f6f74dd439eabaa80fc107adcd3dc04ebf477d
|
tests/test_now.py
|
tests/test_now.py
|
# -*- coding: utf-8 -*-
import pytest
from jinja2 import Environment
@pytest.fixture(scope='session')
def environment():
return Environment(extensions=['jinja2_time.TimeExtension'])
def test_foobar(environment):
assert environment
|
Create a new test module along with a session scoped env fixture
|
Create a new test module along with a session scoped env fixture
|
Python
|
mit
|
hackebrot/jinja2-time
|
Create a new test module along with a session scoped env fixture
|
# -*- coding: utf-8 -*-
import pytest
from jinja2 import Environment
@pytest.fixture(scope='session')
def environment():
return Environment(extensions=['jinja2_time.TimeExtension'])
def test_foobar(environment):
assert environment
|
<commit_before><commit_msg>Create a new test module along with a session scoped env fixture<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from jinja2 import Environment
@pytest.fixture(scope='session')
def environment():
return Environment(extensions=['jinja2_time.TimeExtension'])
def test_foobar(environment):
assert environment
|
Create a new test module along with a session scoped env fixture# -*- coding: utf-8 -*-
import pytest
from jinja2 import Environment
@pytest.fixture(scope='session')
def environment():
return Environment(extensions=['jinja2_time.TimeExtension'])
def test_foobar(environment):
assert environment
|
<commit_before><commit_msg>Create a new test module along with a session scoped env fixture<commit_after># -*- coding: utf-8 -*-
import pytest
from jinja2 import Environment
@pytest.fixture(scope='session')
def environment():
return Environment(extensions=['jinja2_time.TimeExtension'])
def test_foobar(environment):
assert environment
|
|
ba1a80d866748426a06785a2efaa4b6aad3c95a5
|
turing_machine.py
|
turing_machine.py
|
class State():
def __init__(self, ??):
self.end_of_tape = ??
self.write_false = ??
self.move_false = ??
self.write_true = ??
self.move_true = ??
class TuringMachine():
def __init__(self, n):
# ?? Magic that translates int n into a starting tape and a collection of states.
self.init_tape(??)
self.init_states(??)
def init_tape(self, ??):
self.tape = [None]
self.head = 0
# turn ?? into tape contents
def init_states(self, ??):
self.state = 0
self.states = State(??)
def current_input(self):
return self.tape[self.head]
def is_end_of_tape(self):
return self.current_input == None
def write(self, boolean):
# Check that this isn't the end of the tape, and that we are passed a proper bool.
self.tape[self.head] = boolean
def move(self, direction):
if direction == None:
pass
elif direction == False:
self.head -= 1
elif direction == True:
self.head += 1
else:
raise Exception
# If head is out of bounds, raise Exception
def state(self, state_index):
params = self.states[state_index]
if self.current_input() == None:
# Can't overwrite the end of the tape.
self.move(params.end_of_tape)
elif self.current_input() == False:
self.write(params.write_false)
self.move(params.move_false)
elif self.current_input() == True:
self.write(params.write_true)
self.move(params.move_true)
else:
raise Exception
# ?? goto another state or halt
|
Add Turing machine class before integer-to-TM conversion.
|
Add Turing machine class before integer-to-TM conversion.
|
Python
|
mit
|
alexaltair/solomonoff-induction
|
Add Turing machine class before integer-to-TM conversion.
|
class State():
def __init__(self, ??):
self.end_of_tape = ??
self.write_false = ??
self.move_false = ??
self.write_true = ??
self.move_true = ??
class TuringMachine():
def __init__(self, n):
# ?? Magic that translates int n into a starting tape and a collection of states.
self.init_tape(??)
self.init_states(??)
def init_tape(self, ??):
self.tape = [None]
self.head = 0
# turn ?? into tape contents
def init_states(self, ??):
self.state = 0
self.states = State(??)
def current_input(self):
return self.tape[self.head]
def is_end_of_tape(self):
return self.current_input == None
def write(self, boolean):
# Check that this isn't the end of the tape, and that we are passed a proper bool.
self.tape[self.head] = boolean
def move(self, direction):
if direction == None:
pass
elif direction == False:
self.head -= 1
elif direction == True:
self.head += 1
else:
raise Exception
# If head is out of bounds, raise Exception
def state(self, state_index):
params = self.states[state_index]
if self.current_input() == None:
# Can't overwrite the end of the tape.
self.move(params.end_of_tape)
elif self.current_input() == False:
self.write(params.write_false)
self.move(params.move_false)
elif self.current_input() == True:
self.write(params.write_true)
self.move(params.move_true)
else:
raise Exception
# ?? goto another state or halt
|
<commit_before><commit_msg>Add Turing machine class before integer-to-TM conversion.<commit_after>
|
class State():
def __init__(self, ??):
self.end_of_tape = ??
self.write_false = ??
self.move_false = ??
self.write_true = ??
self.move_true = ??
class TuringMachine():
def __init__(self, n):
# ?? Magic that translates int n into a starting tape and a collection of states.
self.init_tape(??)
self.init_states(??)
def init_tape(self, ??):
self.tape = [None]
self.head = 0
# turn ?? into tape contents
def init_states(self, ??):
self.state = 0
self.states = State(??)
def current_input(self):
return self.tape[self.head]
def is_end_of_tape(self):
return self.current_input == None
def write(self, boolean):
# Check that this isn't the end of the tape, and that we are passed a proper bool.
self.tape[self.head] = boolean
def move(self, direction):
if direction == None:
pass
elif direction == False:
self.head -= 1
elif direction == True:
self.head += 1
else:
raise Exception
# If head is out of bounds, raise Exception
def state(self, state_index):
params = self.states[state_index]
if self.current_input() == None:
# Can't overwrite the end of the tape.
self.move(params.end_of_tape)
elif self.current_input() == False:
self.write(params.write_false)
self.move(params.move_false)
elif self.current_input() == True:
self.write(params.write_true)
self.move(params.move_true)
else:
raise Exception
# ?? goto another state or halt
|
Add Turing machine class before integer-to-TM conversion.class State():
def __init__(self, ??):
self.end_of_tape = ??
self.write_false = ??
self.move_false = ??
self.write_true = ??
self.move_true = ??
class TuringMachine():
def __init__(self, n):
# ?? Magic that translates int n into a starting tape and a collection of states.
self.init_tape(??)
self.init_states(??)
def init_tape(self, ??):
self.tape = [None]
self.head = 0
# turn ?? into tape contents
def init_states(self, ??):
self.state = 0
self.states = State(??)
def current_input(self):
return self.tape[self.head]
def is_end_of_tape(self):
return self.current_input == None
def write(self, boolean):
# Check that this isn't the end of the tape, and that we are passed a proper bool.
self.tape[self.head] = boolean
def move(self, direction):
if direction == None:
pass
elif direction == False:
self.head -= 1
elif direction == True:
self.head += 1
else:
raise Exception
# If head is out of bounds, raise Exception
def state(self, state_index):
params = self.states[state_index]
if self.current_input() == None:
# Can't overwrite the end of the tape.
self.move(params.end_of_tape)
elif self.current_input() == False:
self.write(params.write_false)
self.move(params.move_false)
elif self.current_input() == True:
self.write(params.write_true)
self.move(params.move_true)
else:
raise Exception
# ?? goto another state or halt
|
<commit_before><commit_msg>Add Turing machine class before integer-to-TM conversion.<commit_after>class State():
def __init__(self, ??):
self.end_of_tape = ??
self.write_false = ??
self.move_false = ??
self.write_true = ??
self.move_true = ??
class TuringMachine():
def __init__(self, n):
# ?? Magic that translates int n into a starting tape and a collection of states.
self.init_tape(??)
self.init_states(??)
def init_tape(self, ??):
self.tape = [None]
self.head = 0
# turn ?? into tape contents
def init_states(self, ??):
self.state = 0
self.states = State(??)
def current_input(self):
return self.tape[self.head]
def is_end_of_tape(self):
return self.current_input == None
def write(self, boolean):
# Check that this isn't the end of the tape, and that we are passed a proper bool.
self.tape[self.head] = boolean
def move(self, direction):
if direction == None:
pass
elif direction == False:
self.head -= 1
elif direction == True:
self.head += 1
else:
raise Exception
# If head is out of bounds, raise Exception
def state(self, state_index):
params = self.states[state_index]
if self.current_input() == None:
# Can't overwrite the end of the tape.
self.move(params.end_of_tape)
elif self.current_input() == False:
self.write(params.write_false)
self.move(params.move_false)
elif self.current_input() == True:
self.write(params.write_true)
self.move(params.move_true)
else:
raise Exception
# ?? goto another state or halt
|
|
a2a7197834dab6cbdd1d402f10382a6a3514af8f
|
dev/scripts/inject_InChI.py
|
dev/scripts/inject_InChI.py
|
import CoolProp
from chemspipy import ChemSpider
from chemspipy_key import key # private file with the key (DO NOT COMMIT!!)
import glob, json
cs = ChemSpider(key)
# Map from name to Chemspider ID
backup_map = {
'Propyne': 6095,
'R236EA': 71342,
'R245ca': 62827,
'trans-2-Butene': 56442,
'Oxygen': 952,
'Fluorine': 22932,
'Hydrogen': 762,
'Deuterium': 22931,
'HFE143m': 66577
}
# Make sure the key works
c = cs.get_compound(2157)
assert(c.inchikey == 'BSYNRYMUTXBXSQ-UHFFFAOYAW')
for fname in glob.glob('../fluids/*.json'):
with open(fname,'r') as fp:
jj = json.load(fp)
fluid = jj['INFO']['NAME']
def doset(result):
jj['INFO']['INCHI_STRING'] = result.inchi
jj['INFO']['INCHI_KEY'] = result.inchikey
jj['INFO']['CHEMSPIDER_ID'] = result.csid
jj['INFO']['2DPNG_URL'] = result.image_url
jj['INFO']['SMILES'] = result.smiles
CAS = CoolProp.CoolProp.get_fluid_param_string(fluid, "CAS")
if '.' not in CAS:
results = cs.search(CAS)
results.wait()
if len(results) == 1:
doset(results[0])
elif fluid in backup_map:
results = cs.search(backup_map[fluid])
results.wait()
assert(len(results) == 1)
doset(results[0])
else:
print fluid, CAS, '!!failure!!', len(results)
for result in results:
spectra = cs.get_compound_spectra(result.csid)
if spectra and '##CAS REGISTRY NO='+CAS in spectra[0].data:
doset(result)
print ('GOT IT!!')
break
print result.common_name, result.inchikey, result.stdinchi, cs.get_extended_compound_info(result.csid)
print ''
with open(fname,'w') as fp:
json.dump(jj, fp, indent = 2, sort_keys = True)
del jj, fp
|
Add script for injecting InChI keys and string (and others)
|
Add script for injecting InChI keys and string (and others)
|
Python
|
mit
|
JonWel/CoolProp,CoolProp/CoolProp,CoolProp/CoolProp,dcprojects/CoolProp,JonWel/CoolProp,DANA-Laboratory/CoolProp,JonWel/CoolProp,JonWel/CoolProp,CoolProp/CoolProp,DANA-Laboratory/CoolProp,henningjp/CoolProp,CoolProp/CoolProp,DANA-Laboratory/CoolProp,JonWel/CoolProp,dcprojects/CoolProp,henningjp/CoolProp,JonWel/CoolProp,dcprojects/CoolProp,DANA-Laboratory/CoolProp,CoolProp/CoolProp,DANA-Laboratory/CoolProp,henningjp/CoolProp,henningjp/CoolProp,henningjp/CoolProp,dcprojects/CoolProp,dcprojects/CoolProp,CoolProp/CoolProp,JonWel/CoolProp,dcprojects/CoolProp,DANA-Laboratory/CoolProp,CoolProp/CoolProp,henningjp/CoolProp,henningjp/CoolProp,DANA-Laboratory/CoolProp,CoolProp/CoolProp,henningjp/CoolProp,dcprojects/CoolProp
|
Add script for injecting InChI keys and string (and others)
|
import CoolProp
from chemspipy import ChemSpider
from chemspipy_key import key # private file with the key (DO NOT COMMIT!!)
import glob, json
cs = ChemSpider(key)
# Map from name to Chemspider ID
backup_map = {
'Propyne': 6095,
'R236EA': 71342,
'R245ca': 62827,
'trans-2-Butene': 56442,
'Oxygen': 952,
'Fluorine': 22932,
'Hydrogen': 762,
'Deuterium': 22931,
'HFE143m': 66577
}
# Make sure the key works
c = cs.get_compound(2157)
assert(c.inchikey == 'BSYNRYMUTXBXSQ-UHFFFAOYAW')
for fname in glob.glob('../fluids/*.json'):
with open(fname,'r') as fp:
jj = json.load(fp)
fluid = jj['INFO']['NAME']
def doset(result):
jj['INFO']['INCHI_STRING'] = result.inchi
jj['INFO']['INCHI_KEY'] = result.inchikey
jj['INFO']['CHEMSPIDER_ID'] = result.csid
jj['INFO']['2DPNG_URL'] = result.image_url
jj['INFO']['SMILES'] = result.smiles
CAS = CoolProp.CoolProp.get_fluid_param_string(fluid, "CAS")
if '.' not in CAS:
results = cs.search(CAS)
results.wait()
if len(results) == 1:
doset(results[0])
elif fluid in backup_map:
results = cs.search(backup_map[fluid])
results.wait()
assert(len(results) == 1)
doset(results[0])
else:
print fluid, CAS, '!!failure!!', len(results)
for result in results:
spectra = cs.get_compound_spectra(result.csid)
if spectra and '##CAS REGISTRY NO='+CAS in spectra[0].data:
doset(result)
print ('GOT IT!!')
break
print result.common_name, result.inchikey, result.stdinchi, cs.get_extended_compound_info(result.csid)
print ''
with open(fname,'w') as fp:
json.dump(jj, fp, indent = 2, sort_keys = True)
del jj, fp
|
<commit_before><commit_msg>Add script for injecting InChI keys and string (and others)<commit_after>
|
import CoolProp
from chemspipy import ChemSpider
from chemspipy_key import key # private file with the key (DO NOT COMMIT!!)
import glob, json
cs = ChemSpider(key)
# Map from name to Chemspider ID
backup_map = {
'Propyne': 6095,
'R236EA': 71342,
'R245ca': 62827,
'trans-2-Butene': 56442,
'Oxygen': 952,
'Fluorine': 22932,
'Hydrogen': 762,
'Deuterium': 22931,
'HFE143m': 66577
}
# Make sure the key works
c = cs.get_compound(2157)
assert(c.inchikey == 'BSYNRYMUTXBXSQ-UHFFFAOYAW')
for fname in glob.glob('../fluids/*.json'):
with open(fname,'r') as fp:
jj = json.load(fp)
fluid = jj['INFO']['NAME']
def doset(result):
jj['INFO']['INCHI_STRING'] = result.inchi
jj['INFO']['INCHI_KEY'] = result.inchikey
jj['INFO']['CHEMSPIDER_ID'] = result.csid
jj['INFO']['2DPNG_URL'] = result.image_url
jj['INFO']['SMILES'] = result.smiles
CAS = CoolProp.CoolProp.get_fluid_param_string(fluid, "CAS")
if '.' not in CAS:
results = cs.search(CAS)
results.wait()
if len(results) == 1:
doset(results[0])
elif fluid in backup_map:
results = cs.search(backup_map[fluid])
results.wait()
assert(len(results) == 1)
doset(results[0])
else:
print fluid, CAS, '!!failure!!', len(results)
for result in results:
spectra = cs.get_compound_spectra(result.csid)
if spectra and '##CAS REGISTRY NO='+CAS in spectra[0].data:
doset(result)
print ('GOT IT!!')
break
print result.common_name, result.inchikey, result.stdinchi, cs.get_extended_compound_info(result.csid)
print ''
with open(fname,'w') as fp:
json.dump(jj, fp, indent = 2, sort_keys = True)
del jj, fp
|
Add script for injecting InChI keys and string (and others)import CoolProp
from chemspipy import ChemSpider
from chemspipy_key import key # private file with the key (DO NOT COMMIT!!)
import glob, json
cs = ChemSpider(key)
# Map from name to Chemspider ID
backup_map = {
'Propyne': 6095,
'R236EA': 71342,
'R245ca': 62827,
'trans-2-Butene': 56442,
'Oxygen': 952,
'Fluorine': 22932,
'Hydrogen': 762,
'Deuterium': 22931,
'HFE143m': 66577
}
# Make sure the key works
c = cs.get_compound(2157)
assert(c.inchikey == 'BSYNRYMUTXBXSQ-UHFFFAOYAW')
for fname in glob.glob('../fluids/*.json'):
with open(fname,'r') as fp:
jj = json.load(fp)
fluid = jj['INFO']['NAME']
def doset(result):
jj['INFO']['INCHI_STRING'] = result.inchi
jj['INFO']['INCHI_KEY'] = result.inchikey
jj['INFO']['CHEMSPIDER_ID'] = result.csid
jj['INFO']['2DPNG_URL'] = result.image_url
jj['INFO']['SMILES'] = result.smiles
CAS = CoolProp.CoolProp.get_fluid_param_string(fluid, "CAS")
if '.' not in CAS:
results = cs.search(CAS)
results.wait()
if len(results) == 1:
doset(results[0])
elif fluid in backup_map:
results = cs.search(backup_map[fluid])
results.wait()
assert(len(results) == 1)
doset(results[0])
else:
print fluid, CAS, '!!failure!!', len(results)
for result in results:
spectra = cs.get_compound_spectra(result.csid)
if spectra and '##CAS REGISTRY NO='+CAS in spectra[0].data:
doset(result)
print ('GOT IT!!')
break
print result.common_name, result.inchikey, result.stdinchi, cs.get_extended_compound_info(result.csid)
print ''
with open(fname,'w') as fp:
json.dump(jj, fp, indent = 2, sort_keys = True)
del jj, fp
|
<commit_before><commit_msg>Add script for injecting InChI keys and string (and others)<commit_after>import CoolProp
from chemspipy import ChemSpider
from chemspipy_key import key # private file with the key (DO NOT COMMIT!!)
import glob, json
cs = ChemSpider(key)
# Map from name to Chemspider ID
backup_map = {
'Propyne': 6095,
'R236EA': 71342,
'R245ca': 62827,
'trans-2-Butene': 56442,
'Oxygen': 952,
'Fluorine': 22932,
'Hydrogen': 762,
'Deuterium': 22931,
'HFE143m': 66577
}
# Make sure the key works
c = cs.get_compound(2157)
assert(c.inchikey == 'BSYNRYMUTXBXSQ-UHFFFAOYAW')
for fname in glob.glob('../fluids/*.json'):
with open(fname,'r') as fp:
jj = json.load(fp)
fluid = jj['INFO']['NAME']
def doset(result):
jj['INFO']['INCHI_STRING'] = result.inchi
jj['INFO']['INCHI_KEY'] = result.inchikey
jj['INFO']['CHEMSPIDER_ID'] = result.csid
jj['INFO']['2DPNG_URL'] = result.image_url
jj['INFO']['SMILES'] = result.smiles
CAS = CoolProp.CoolProp.get_fluid_param_string(fluid, "CAS")
if '.' not in CAS:
results = cs.search(CAS)
results.wait()
if len(results) == 1:
doset(results[0])
elif fluid in backup_map:
results = cs.search(backup_map[fluid])
results.wait()
assert(len(results) == 1)
doset(results[0])
else:
print fluid, CAS, '!!failure!!', len(results)
for result in results:
spectra = cs.get_compound_spectra(result.csid)
if spectra and '##CAS REGISTRY NO='+CAS in spectra[0].data:
doset(result)
print ('GOT IT!!')
break
print result.common_name, result.inchikey, result.stdinchi, cs.get_extended_compound_info(result.csid)
print ''
with open(fname,'w') as fp:
json.dump(jj, fp, indent = 2, sort_keys = True)
del jj, fp
|
|
1652d1c425ae85a356fe78f71d3942351b983f58
|
altair/vegalite/v2/examples/scatter_linked_brush.py
|
altair/vegalite/v2/examples/scatter_linked_brush.py
|
"""
Faceted Scatter Plot with Linked Brushing
-----------------------------------------
This is an example of using an interval selection to control the color of
points across multiple facets.
"""
import altair as alt
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval', resolve='global')
base = alt.Chart(cars).mark_point().encode(
y='Miles_per_Gallon',
color=brush.condition('Origin',
alt.ColorValue('gray'))
).properties(
selection=brush,
width=300,
height=300
)
chart = base.encode(x='Horsepower') | base.encode(x='Acceleration')
|
Add scatter linked-brush selection example
|
Add scatter linked-brush selection example
|
Python
|
bsd-3-clause
|
ellisonbg/altair,altair-viz/altair,jakevdp/altair
|
Add scatter linked-brush selection example
|
"""
Faceted Scatter Plot with Linked Brushing
-----------------------------------------
This is an example of using an interval selection to control the color of
points across multiple facets.
"""
import altair as alt
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval', resolve='global')
base = alt.Chart(cars).mark_point().encode(
y='Miles_per_Gallon',
color=brush.condition('Origin',
alt.ColorValue('gray'))
).properties(
selection=brush,
width=300,
height=300
)
chart = base.encode(x='Horsepower') | base.encode(x='Acceleration')
|
<commit_before><commit_msg>Add scatter linked-brush selection example<commit_after>
|
"""
Faceted Scatter Plot with Linked Brushing
-----------------------------------------
This is an example of using an interval selection to control the color of
points across multiple facets.
"""
import altair as alt
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval', resolve='global')
base = alt.Chart(cars).mark_point().encode(
y='Miles_per_Gallon',
color=brush.condition('Origin',
alt.ColorValue('gray'))
).properties(
selection=brush,
width=300,
height=300
)
chart = base.encode(x='Horsepower') | base.encode(x='Acceleration')
|
Add scatter linked-brush selection example"""
Faceted Scatter Plot with Linked Brushing
-----------------------------------------
This is an example of using an interval selection to control the color of
points across multiple facets.
"""
import altair as alt
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval', resolve='global')
base = alt.Chart(cars).mark_point().encode(
y='Miles_per_Gallon',
color=brush.condition('Origin',
alt.ColorValue('gray'))
).properties(
selection=brush,
width=300,
height=300
)
chart = base.encode(x='Horsepower') | base.encode(x='Acceleration')
|
<commit_before><commit_msg>Add scatter linked-brush selection example<commit_after>"""
Faceted Scatter Plot with Linked Brushing
-----------------------------------------
This is an example of using an interval selection to control the color of
points across multiple facets.
"""
import altair as alt
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval', resolve='global')
base = alt.Chart(cars).mark_point().encode(
y='Miles_per_Gallon',
color=brush.condition('Origin',
alt.ColorValue('gray'))
).properties(
selection=brush,
width=300,
height=300
)
chart = base.encode(x='Horsepower') | base.encode(x='Acceleration')
|
|
12281dbedc0bd43ef4b85b936cb662f5092d8a72
|
cum60.py
|
cum60.py
|
#!/usr/bin/env python3
import json
import zmq
import zconfig
import zutils
def main():
logger = zutils.getLogger(__name__)
ctx = zmq.Context()
subsock = ctx.socket(zmq.SUB)
subsock.connect("tcp://{}:{}".format(zconfig.IP_ADDR,
zconfig.PROXY_PUB_PORT))
subsock.setsockopt(zmq.SUBSCRIBE, b"raw")
logger.info("Started cum60 service")
while True:
try:
[topic, message] = subsock.recv_multipart()
topic = topic.decode()
message = json.loads(message.decode())
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
break
else:
raise
except KeyboardInterrupt:
logger.info("Recevied interrupt to stop cum60 service")
break
if __name__ == "__main__":
main()
|
Add cumulative 60 module (L2)
|
Add cumulative 60 module (L2)
|
Python
|
mit
|
mangalaman93/pstickle
|
Add cumulative 60 module (L2)
|
#!/usr/bin/env python3
import json
import zmq
import zconfig
import zutils
def main():
logger = zutils.getLogger(__name__)
ctx = zmq.Context()
subsock = ctx.socket(zmq.SUB)
subsock.connect("tcp://{}:{}".format(zconfig.IP_ADDR,
zconfig.PROXY_PUB_PORT))
subsock.setsockopt(zmq.SUBSCRIBE, b"raw")
logger.info("Started cum60 service")
while True:
try:
[topic, message] = subsock.recv_multipart()
topic = topic.decode()
message = json.loads(message.decode())
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
break
else:
raise
except KeyboardInterrupt:
logger.info("Recevied interrupt to stop cum60 service")
break
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add cumulative 60 module (L2)<commit_after>
|
#!/usr/bin/env python3
import json
import zmq
import zconfig
import zutils
def main():
logger = zutils.getLogger(__name__)
ctx = zmq.Context()
subsock = ctx.socket(zmq.SUB)
subsock.connect("tcp://{}:{}".format(zconfig.IP_ADDR,
zconfig.PROXY_PUB_PORT))
subsock.setsockopt(zmq.SUBSCRIBE, b"raw")
logger.info("Started cum60 service")
while True:
try:
[topic, message] = subsock.recv_multipart()
topic = topic.decode()
message = json.loads(message.decode())
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
break
else:
raise
except KeyboardInterrupt:
logger.info("Recevied interrupt to stop cum60 service")
break
if __name__ == "__main__":
main()
|
Add cumulative 60 module (L2)#!/usr/bin/env python3
import json
import zmq
import zconfig
import zutils
def main():
logger = zutils.getLogger(__name__)
ctx = zmq.Context()
subsock = ctx.socket(zmq.SUB)
subsock.connect("tcp://{}:{}".format(zconfig.IP_ADDR,
zconfig.PROXY_PUB_PORT))
subsock.setsockopt(zmq.SUBSCRIBE, b"raw")
logger.info("Started cum60 service")
while True:
try:
[topic, message] = subsock.recv_multipart()
topic = topic.decode()
message = json.loads(message.decode())
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
break
else:
raise
except KeyboardInterrupt:
logger.info("Recevied interrupt to stop cum60 service")
break
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add cumulative 60 module (L2)<commit_after>#!/usr/bin/env python3
import json
import zmq
import zconfig
import zutils
def main():
logger = zutils.getLogger(__name__)
ctx = zmq.Context()
subsock = ctx.socket(zmq.SUB)
subsock.connect("tcp://{}:{}".format(zconfig.IP_ADDR,
zconfig.PROXY_PUB_PORT))
subsock.setsockopt(zmq.SUBSCRIBE, b"raw")
logger.info("Started cum60 service")
while True:
try:
[topic, message] = subsock.recv_multipart()
topic = topic.decode()
message = json.loads(message.decode())
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
break
else:
raise
except KeyboardInterrupt:
logger.info("Recevied interrupt to stop cum60 service")
break
if __name__ == "__main__":
main()
|
|
aace23c5cd0d420f05affab431c71ff9b384154f
|
jobmon/test/test_command_server.py
|
jobmon/test/test_command_server.py
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import command_server, protocol, transport
PORT = 9999
class CommandServerRecorder:
def __init__(self):
self.commands = []
def start_job(self, job):
self.commands.append(('start', job))
return protocol.SuccessResponse(job)
def stop_job(self, job):
self.commands.append(('stop', job))
return protocol.SuccessResponse(job)
def get_status(self, job):
self.commands.append(('status', job))
return protocol.StatusResponse(job, True)
def list_jobs(self):
self.commands.append('list')
return protocol.JobListResponse({'a': True, 'b': False})
def terminate(self):
self.commands.append('terminate')
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
command_recorder = CommandServerRecorder()
command_svr = command_server.CommandServer(PORT, command_recorder)
command_svr.start()
command_pipe = transport.CommandPipe(PORT)
try:
responses = [
None,
None,
True,
{
'a': True,
'b': False,
},
None
]
real_responses = [
command_pipe.start_job('some_job'),
command_pipe.stop_job('some_job'),
command_pipe.is_running('some_job'),
command_pipe.get_jobs(),
command_pipe.terminate(),
]
time.sleep(5) # Give the server time to call our terminate method
# and record it
self.assertEqual(real_responses, responses)
self.assertEqual(command_recorder.commands,
[('start', 'some_job'),
('stop', 'some_job'),
('status', 'some_job'),
'list',
'terminate'])
finally:
command_svr.terminate()
command_pipe.destroy()
|
Write tests for the command server
|
Write tests for the command server
|
Python
|
bsd-2-clause
|
adamnew123456/jobmon
|
Write tests for the command server
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import command_server, protocol, transport
PORT = 9999
class CommandServerRecorder:
def __init__(self):
self.commands = []
def start_job(self, job):
self.commands.append(('start', job))
return protocol.SuccessResponse(job)
def stop_job(self, job):
self.commands.append(('stop', job))
return protocol.SuccessResponse(job)
def get_status(self, job):
self.commands.append(('status', job))
return protocol.StatusResponse(job, True)
def list_jobs(self):
self.commands.append('list')
return protocol.JobListResponse({'a': True, 'b': False})
def terminate(self):
self.commands.append('terminate')
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
command_recorder = CommandServerRecorder()
command_svr = command_server.CommandServer(PORT, command_recorder)
command_svr.start()
command_pipe = transport.CommandPipe(PORT)
try:
responses = [
None,
None,
True,
{
'a': True,
'b': False,
},
None
]
real_responses = [
command_pipe.start_job('some_job'),
command_pipe.stop_job('some_job'),
command_pipe.is_running('some_job'),
command_pipe.get_jobs(),
command_pipe.terminate(),
]
time.sleep(5) # Give the server time to call our terminate method
# and record it
self.assertEqual(real_responses, responses)
self.assertEqual(command_recorder.commands,
[('start', 'some_job'),
('stop', 'some_job'),
('status', 'some_job'),
'list',
'terminate'])
finally:
command_svr.terminate()
command_pipe.destroy()
|
<commit_before><commit_msg>Write tests for the command server<commit_after>
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import command_server, protocol, transport
PORT = 9999
class CommandServerRecorder:
def __init__(self):
self.commands = []
def start_job(self, job):
self.commands.append(('start', job))
return protocol.SuccessResponse(job)
def stop_job(self, job):
self.commands.append(('stop', job))
return protocol.SuccessResponse(job)
def get_status(self, job):
self.commands.append(('status', job))
return protocol.StatusResponse(job, True)
def list_jobs(self):
self.commands.append('list')
return protocol.JobListResponse({'a': True, 'b': False})
def terminate(self):
self.commands.append('terminate')
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
command_recorder = CommandServerRecorder()
command_svr = command_server.CommandServer(PORT, command_recorder)
command_svr.start()
command_pipe = transport.CommandPipe(PORT)
try:
responses = [
None,
None,
True,
{
'a': True,
'b': False,
},
None
]
real_responses = [
command_pipe.start_job('some_job'),
command_pipe.stop_job('some_job'),
command_pipe.is_running('some_job'),
command_pipe.get_jobs(),
command_pipe.terminate(),
]
time.sleep(5) # Give the server time to call our terminate method
# and record it
self.assertEqual(real_responses, responses)
self.assertEqual(command_recorder.commands,
[('start', 'some_job'),
('stop', 'some_job'),
('status', 'some_job'),
'list',
'terminate'])
finally:
command_svr.terminate()
command_pipe.destroy()
|
Write tests for the command serverimport os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import command_server, protocol, transport
PORT = 9999
class CommandServerRecorder:
def __init__(self):
self.commands = []
def start_job(self, job):
self.commands.append(('start', job))
return protocol.SuccessResponse(job)
def stop_job(self, job):
self.commands.append(('stop', job))
return protocol.SuccessResponse(job)
def get_status(self, job):
self.commands.append(('status', job))
return protocol.StatusResponse(job, True)
def list_jobs(self):
self.commands.append('list')
return protocol.JobListResponse({'a': True, 'b': False})
def terminate(self):
self.commands.append('terminate')
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
command_recorder = CommandServerRecorder()
command_svr = command_server.CommandServer(PORT, command_recorder)
command_svr.start()
command_pipe = transport.CommandPipe(PORT)
try:
responses = [
None,
None,
True,
{
'a': True,
'b': False,
},
None
]
real_responses = [
command_pipe.start_job('some_job'),
command_pipe.stop_job('some_job'),
command_pipe.is_running('some_job'),
command_pipe.get_jobs(),
command_pipe.terminate(),
]
time.sleep(5) # Give the server time to call our terminate method
# and record it
self.assertEqual(real_responses, responses)
self.assertEqual(command_recorder.commands,
[('start', 'some_job'),
('stop', 'some_job'),
('status', 'some_job'),
'list',
'terminate'])
finally:
command_svr.terminate()
command_pipe.destroy()
|
<commit_before><commit_msg>Write tests for the command server<commit_after>import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import command_server, protocol, transport
PORT = 9999
class CommandServerRecorder:
def __init__(self):
self.commands = []
def start_job(self, job):
self.commands.append(('start', job))
return protocol.SuccessResponse(job)
def stop_job(self, job):
self.commands.append(('stop', job))
return protocol.SuccessResponse(job)
def get_status(self, job):
self.commands.append(('status', job))
return protocol.StatusResponse(job, True)
def list_jobs(self):
self.commands.append('list')
return protocol.JobListResponse({'a': True, 'b': False})
def terminate(self):
self.commands.append('terminate')
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
command_recorder = CommandServerRecorder()
command_svr = command_server.CommandServer(PORT, command_recorder)
command_svr.start()
command_pipe = transport.CommandPipe(PORT)
try:
responses = [
None,
None,
True,
{
'a': True,
'b': False,
},
None
]
real_responses = [
command_pipe.start_job('some_job'),
command_pipe.stop_job('some_job'),
command_pipe.is_running('some_job'),
command_pipe.get_jobs(),
command_pipe.terminate(),
]
time.sleep(5) # Give the server time to call our terminate method
# and record it
self.assertEqual(real_responses, responses)
self.assertEqual(command_recorder.commands,
[('start', 'some_job'),
('stop', 'some_job'),
('status', 'some_job'),
'list',
'terminate'])
finally:
command_svr.terminate()
command_pipe.destroy()
|
|
d4f6de5bf5fc7ccf2d50d90f7e3b9b43b2715aff
|
python/misc/toutv-rename.py
|
python/misc/toutv-rename.py
|
#!/usr/bin/env python
# coding=utf8
from enum import Enum
import os
import os.path
import re
import sys
def main():
VideoTypes = Enum('VideoType', 'emission film miniserie')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
if (len(season) == 4) and (video_type != VideoTypes.film):
while True:
response = input('Is this a miniseries? (y/n) ')
if response.lower() == 'y':
video_type = VideoTypes.miniserie
break
elif response.lower() == 'n':
video_type = VideoTypes.film
break
if video_type == VideoTypes.miniserie:
episode = 'Partie {0}'.format(episode[episode.find('E') + 1:])
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
|
Add random script for renaming toutv files
|
Add random script for renaming toutv files
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add random script for renaming toutv files
|
#!/usr/bin/env python
# coding=utf8
from enum import Enum
import os
import os.path
import re
import sys
def main():
VideoTypes = Enum('VideoType', 'emission film miniserie')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
if (len(season) == 4) and (video_type != VideoTypes.film):
while True:
response = input('Is this a miniseries? (y/n) ')
if response.lower() == 'y':
video_type = VideoTypes.miniserie
break
elif response.lower() == 'n':
video_type = VideoTypes.film
break
if video_type == VideoTypes.miniserie:
episode = 'Partie {0}'.format(episode[episode.find('E') + 1:])
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add random script for renaming toutv files<commit_after>
|
#!/usr/bin/env python
# coding=utf8
from enum import Enum
import os
import os.path
import re
import sys
def main():
VideoTypes = Enum('VideoType', 'emission film miniserie')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
if (len(season) == 4) and (video_type != VideoTypes.film):
while True:
response = input('Is this a miniseries? (y/n) ')
if response.lower() == 'y':
video_type = VideoTypes.miniserie
break
elif response.lower() == 'n':
video_type = VideoTypes.film
break
if video_type == VideoTypes.miniserie:
episode = 'Partie {0}'.format(episode[episode.find('E') + 1:])
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
|
Add random script for renaming toutv files#!/usr/bin/env python
# coding=utf8
from enum import Enum
import os
import os.path
import re
import sys
def main():
VideoTypes = Enum('VideoType', 'emission film miniserie')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
if (len(season) == 4) and (video_type != VideoTypes.film):
while True:
response = input('Is this a miniseries? (y/n) ')
if response.lower() == 'y':
video_type = VideoTypes.miniserie
break
elif response.lower() == 'n':
video_type = VideoTypes.film
break
if video_type == VideoTypes.miniserie:
episode = 'Partie {0}'.format(episode[episode.find('E') + 1:])
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add random script for renaming toutv files<commit_after>#!/usr/bin/env python
# coding=utf8
from enum import Enum
import os
import os.path
import re
import sys
def main():
VideoTypes = Enum('VideoType', 'emission film miniserie')
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.[\d]+kbps\.ts'.format(filename_chars))
# pattern = re.compile('([{0}]+)\.(S[\d]+E[\d]+)\.([{0}]+)'.format(filename_chars))
files_to_rename = {}
video_type = None
def parse_filename(filename):
nonlocal video_type
match = pattern.search(filename)
if match:
show = match.group(1).replace('.', ' ')
episode = match.group(2)
season = match.group(3)
title = match.group(4).replace('.', ' ')
if show.lower() == title.lower():
video_type = VideoTypes.film
if (len(season) == 4) and (video_type != VideoTypes.film):
while True:
response = input('Is this a miniseries? (y/n) ')
if response.lower() == 'y':
video_type = VideoTypes.miniserie
break
elif response.lower() == 'n':
video_type = VideoTypes.film
break
if video_type == VideoTypes.miniserie:
episode = 'Partie {0}'.format(episode[episode.find('E') + 1:])
return show, episode, season, title
else:
sys.exit('ERROR: Unrecognized character in {0}\n'.format(filename))
for filename in sorted(os.listdir(os.getcwd())):
if not filename.endswith('.ts'):
continue
show, episode, season, title = parse_filename(filename)
if video_type == VideoTypes.film:
renamed_filename = '{} ({}).mp4'.format(title, season)
else:
renamed_filename = '{} - {} - {}.mp4'.format(show, episode, title)
print(filename)
print('\t{}'.format(renamed_filename))
files_to_rename[filename] = renamed_filename
response = input('Rename files? (y/n) ')
if response == 'y':
for filename in files_to_rename:
os.rename(
os.path.join(
os.getcwd(),
filename
),
os.path.join(
os.getcwd(),
files_to_rename[filename]
)
)
if __name__ == '__main__':
main()
|
|
04919ac67c187380c43e9f1f557167e5eb6d02e4
|
tests/test_html_formatter.py
|
tests/test_html_formatter.py
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
Add a reminder to write a HTML formatter test.
|
[svn] Add a reminder to write a HTML formatter test.
--HG--
branch : trunk
|
Python
|
bsd-2-clause
|
Khan/pygments,dbrgn/pygments-mirror,dbrgn/pygments-mirror,nsfmc/pygments,nsfmc/pygments,nex3/pygments,dbrgn/pygments-mirror,Khan/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,dbrgn/pygments-mirror,nex3/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,nex3/pygments,Khan/pygments,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,nsfmc/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,dbrgn/pygments-mirror,Khan/pygments,Khan/pygments,nsfmc/pygments,Khan/pygments,nex3/pygments,nex3/pygments,Khan/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nex3/pygments,nsfmc/pygments,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,nsfmc/pygments,nsfmc/pygments,Khan/pygments,dbrgn/pygments-mirror,nsfmc/pygments,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,nsfmc/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,nsfmc/pygments,kirbyfan64/pygments-unofficial,Khan/pygments,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,Khan/pygments,nex3/pygments,nex3/pygments,Khan/pygments,dbrgn/pygments-mirror,Khan/pygments,nex3/pygments,Khan/pygments,Khan/pygments,Khan/pygments,dbrgn/pygments-mirror,nsfmc/pygments,nex3/pygments,kirbyfan64/pygments-unofficial,nsfmc/pygments,kirbyfan64/pygments-unofficial,nex3/pygments,kirbyfan64/pygments-unofficial
|
[svn] Add a reminder to write a HTML formatter test.
--HG--
branch : trunk
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
<commit_before><commit_msg>[svn] Add a reminder to write a HTML formatter test.
--HG--
branch : trunk<commit_after>
|
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
[svn] Add a reminder to write a HTML formatter test.
--HG--
branch : trunk# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
<commit_before><commit_msg>[svn] Add a reminder to write a HTML formatter test.
--HG--
branch : trunk<commit_after># -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
import StringIO
import random
from pygments import lexers, formatters
from pygments.token import _TokenType
class HtmlFormatterTest(unittest.TestCase):
def test_external_css(self):
# TODO: write this test.
pass
|
|
ca9a19f721b06b619ffdab5bda1814667294d505
|
tests/test_librato_uptime.py
|
tests/test_librato_uptime.py
|
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation.librato_uptime import Calculator
from google.appengine.ext import testbed
import mock
UPTIME_CFG = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'MOCK_USERNAME',
'password': 'MOCK_PWD',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'MOCK_TOTAL_TARGET_A',
'MOCK_TOTAL_TARGET_B',
],
'ERROR_TARGETS': [
'MOCK_ERROR_TARGET_A',
'MOCK_ERROR_TARGET_B',
'MOCK_ERROR_TARGET_C',
]
},
}
}
class TestLibratoUptime(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def test_refresh(self):
index = [0]
resps = [
# TOTAL A
dict(
measurements=dict(
foobar=[dict(count=1), dict(count=7788), dict(count=123)],
),
query=dict(next_time=999),
),
dict(
measurements=dict(
foobar=[dict(count=888)],
),
),
# TOTAL B
dict(
measurements=dict(
foobar=[dict(count=3), dict(count=4)],
)
),
# ERROR A
dict(
measurements=dict(
foobar=[dict(count=5)],
barfoo=[dict(count=78)],
)
),
# ERROR B
dict(
measurements=dict(
foobar=[dict(count=6)],
barfoo=[dict(count=78)],
)
),
# ERROR C
dict(
measurements=dict(
foobar=[dict(count=0)],
)
),
]
def read():
result = resps[index[0]]
index[0] += 1
return json.dumps(result)
calculator = Calculator(**UPTIME_CFG)
calculator.opener = mock.Mock()
calculator.opener.open.return_value = mock.Mock()
calculator.opener.open.return_value.read.side_effect = read
result = calculator._for_service(calculator.services['API'], 5)
total_number = (
(1 + 7788 + 123) +
(888) +
(3 + 4)
)
error_number = (
(5 + 78) +
(6 + 78) +
(0)
)
expected_result = ((total_number - float(error_number)) / total_number) * 100
self.assertEqual(result, expected_result)
|
Add test for librato uptime
|
Add test for librato uptime
|
Python
|
mit
|
balanced/status.balancedpayments.com,chriskuehl/kloudless-status,chriskuehl/kloudless-status,balanced/status.balancedpayments.com,balanced/status.balancedpayments.com,chriskuehl/kloudless-status
|
Add test for librato uptime
|
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation.librato_uptime import Calculator
from google.appengine.ext import testbed
import mock
UPTIME_CFG = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'MOCK_USERNAME',
'password': 'MOCK_PWD',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'MOCK_TOTAL_TARGET_A',
'MOCK_TOTAL_TARGET_B',
],
'ERROR_TARGETS': [
'MOCK_ERROR_TARGET_A',
'MOCK_ERROR_TARGET_B',
'MOCK_ERROR_TARGET_C',
]
},
}
}
class TestLibratoUptime(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def test_refresh(self):
index = [0]
resps = [
# TOTAL A
dict(
measurements=dict(
foobar=[dict(count=1), dict(count=7788), dict(count=123)],
),
query=dict(next_time=999),
),
dict(
measurements=dict(
foobar=[dict(count=888)],
),
),
# TOTAL B
dict(
measurements=dict(
foobar=[dict(count=3), dict(count=4)],
)
),
# ERROR A
dict(
measurements=dict(
foobar=[dict(count=5)],
barfoo=[dict(count=78)],
)
),
# ERROR B
dict(
measurements=dict(
foobar=[dict(count=6)],
barfoo=[dict(count=78)],
)
),
# ERROR C
dict(
measurements=dict(
foobar=[dict(count=0)],
)
),
]
def read():
result = resps[index[0]]
index[0] += 1
return json.dumps(result)
calculator = Calculator(**UPTIME_CFG)
calculator.opener = mock.Mock()
calculator.opener.open.return_value = mock.Mock()
calculator.opener.open.return_value.read.side_effect = read
result = calculator._for_service(calculator.services['API'], 5)
total_number = (
(1 + 7788 + 123) +
(888) +
(3 + 4)
)
error_number = (
(5 + 78) +
(6 + 78) +
(0)
)
expected_result = ((total_number - float(error_number)) / total_number) * 100
self.assertEqual(result, expected_result)
|
<commit_before><commit_msg>Add test for librato uptime<commit_after>
|
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation.librato_uptime import Calculator
from google.appengine.ext import testbed
import mock
UPTIME_CFG = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'MOCK_USERNAME',
'password': 'MOCK_PWD',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'MOCK_TOTAL_TARGET_A',
'MOCK_TOTAL_TARGET_B',
],
'ERROR_TARGETS': [
'MOCK_ERROR_TARGET_A',
'MOCK_ERROR_TARGET_B',
'MOCK_ERROR_TARGET_C',
]
},
}
}
class TestLibratoUptime(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def test_refresh(self):
index = [0]
resps = [
# TOTAL A
dict(
measurements=dict(
foobar=[dict(count=1), dict(count=7788), dict(count=123)],
),
query=dict(next_time=999),
),
dict(
measurements=dict(
foobar=[dict(count=888)],
),
),
# TOTAL B
dict(
measurements=dict(
foobar=[dict(count=3), dict(count=4)],
)
),
# ERROR A
dict(
measurements=dict(
foobar=[dict(count=5)],
barfoo=[dict(count=78)],
)
),
# ERROR B
dict(
measurements=dict(
foobar=[dict(count=6)],
barfoo=[dict(count=78)],
)
),
# ERROR C
dict(
measurements=dict(
foobar=[dict(count=0)],
)
),
]
def read():
result = resps[index[0]]
index[0] += 1
return json.dumps(result)
calculator = Calculator(**UPTIME_CFG)
calculator.opener = mock.Mock()
calculator.opener.open.return_value = mock.Mock()
calculator.opener.open.return_value.read.side_effect = read
result = calculator._for_service(calculator.services['API'], 5)
total_number = (
(1 + 7788 + 123) +
(888) +
(3 + 4)
)
error_number = (
(5 + 78) +
(6 + 78) +
(0)
)
expected_result = ((total_number - float(error_number)) / total_number) * 100
self.assertEqual(result, expected_result)
|
Add test for librato uptimeimport os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation.librato_uptime import Calculator
from google.appengine.ext import testbed
import mock
UPTIME_CFG = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'MOCK_USERNAME',
'password': 'MOCK_PWD',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'MOCK_TOTAL_TARGET_A',
'MOCK_TOTAL_TARGET_B',
],
'ERROR_TARGETS': [
'MOCK_ERROR_TARGET_A',
'MOCK_ERROR_TARGET_B',
'MOCK_ERROR_TARGET_C',
]
},
}
}
class TestLibratoUptime(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def test_refresh(self):
index = [0]
resps = [
# TOTAL A
dict(
measurements=dict(
foobar=[dict(count=1), dict(count=7788), dict(count=123)],
),
query=dict(next_time=999),
),
dict(
measurements=dict(
foobar=[dict(count=888)],
),
),
# TOTAL B
dict(
measurements=dict(
foobar=[dict(count=3), dict(count=4)],
)
),
# ERROR A
dict(
measurements=dict(
foobar=[dict(count=5)],
barfoo=[dict(count=78)],
)
),
# ERROR B
dict(
measurements=dict(
foobar=[dict(count=6)],
barfoo=[dict(count=78)],
)
),
# ERROR C
dict(
measurements=dict(
foobar=[dict(count=0)],
)
),
]
def read():
result = resps[index[0]]
index[0] += 1
return json.dumps(result)
calculator = Calculator(**UPTIME_CFG)
calculator.opener = mock.Mock()
calculator.opener.open.return_value = mock.Mock()
calculator.opener.open.return_value.read.side_effect = read
result = calculator._for_service(calculator.services['API'], 5)
total_number = (
(1 + 7788 + 123) +
(888) +
(3 + 4)
)
error_number = (
(5 + 78) +
(6 + 78) +
(0)
)
expected_result = ((total_number - float(error_number)) / total_number) * 100
self.assertEqual(result, expected_result)
|
<commit_before><commit_msg>Add test for librato uptime<commit_after>import os
import sys
import unittest
import json
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation.librato_uptime import Calculator
from google.appengine.ext import testbed
import mock
UPTIME_CFG = {
'root_uri': 'https://metrics-api.librato.com/v1/metrics/',
'username': 'MOCK_USERNAME',
'password': 'MOCK_PWD',
'services': {
'API': {
'SOURCE': '*bapi-live*',
'TOTAL_TARGETS': [
'MOCK_TOTAL_TARGET_A',
'MOCK_TOTAL_TARGET_B',
],
'ERROR_TARGETS': [
'MOCK_ERROR_TARGET_A',
'MOCK_ERROR_TARGET_B',
'MOCK_ERROR_TARGET_C',
]
},
}
}
class TestLibratoUptime(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def test_refresh(self):
index = [0]
resps = [
# TOTAL A
dict(
measurements=dict(
foobar=[dict(count=1), dict(count=7788), dict(count=123)],
),
query=dict(next_time=999),
),
dict(
measurements=dict(
foobar=[dict(count=888)],
),
),
# TOTAL B
dict(
measurements=dict(
foobar=[dict(count=3), dict(count=4)],
)
),
# ERROR A
dict(
measurements=dict(
foobar=[dict(count=5)],
barfoo=[dict(count=78)],
)
),
# ERROR B
dict(
measurements=dict(
foobar=[dict(count=6)],
barfoo=[dict(count=78)],
)
),
# ERROR C
dict(
measurements=dict(
foobar=[dict(count=0)],
)
),
]
def read():
result = resps[index[0]]
index[0] += 1
return json.dumps(result)
calculator = Calculator(**UPTIME_CFG)
calculator.opener = mock.Mock()
calculator.opener.open.return_value = mock.Mock()
calculator.opener.open.return_value.read.side_effect = read
result = calculator._for_service(calculator.services['API'], 5)
total_number = (
(1 + 7788 + 123) +
(888) +
(3 + 4)
)
error_number = (
(5 + 78) +
(6 + 78) +
(0)
)
expected_result = ((total_number - float(error_number)) / total_number) * 100
self.assertEqual(result, expected_result)
|
|
204f195891fa8f071009f0015fb81c280c3c5cbe
|
userconf.py
|
userconf.py
|
# Additional configuration
# User defined global variables
# Snack calling method
# Choices are 'exe', 'python', 'tcl'
# Setting it to None, uses the default for your operating system
user_default_snack_method = None
|
Add config file for global user-defined variables
|
Add config file for global user-defined variables
|
Python
|
apache-2.0
|
voicesauce/opensauce-python,voicesauce/opensauce-python,voicesauce/opensauce-python
|
Add config file for global user-defined variables
|
# Additional configuration
# User defined global variables
# Snack calling method
# Choices are 'exe', 'python', 'tcl'
# Setting it to None, uses the default for your operating system
user_default_snack_method = None
|
<commit_before><commit_msg>Add config file for global user-defined variables<commit_after>
|
# Additional configuration
# User defined global variables
# Snack calling method
# Choices are 'exe', 'python', 'tcl'
# Setting it to None, uses the default for your operating system
user_default_snack_method = None
|
Add config file for global user-defined variables# Additional configuration
# User defined global variables
# Snack calling method
# Choices are 'exe', 'python', 'tcl'
# Setting it to None, uses the default for your operating system
user_default_snack_method = None
|
<commit_before><commit_msg>Add config file for global user-defined variables<commit_after># Additional configuration
# User defined global variables
# Snack calling method
# Choices are 'exe', 'python', 'tcl'
# Setting it to None, uses the default for your operating system
user_default_snack_method = None
|
|
f5d418c229ea240b097c091ceb00d5d07275b56a
|
dakota/tests/test_dakota_utils.py
|
dakota/tests/test_dakota_utils.py
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_utils module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
from nose.tools import *
from dakota.dakota_utils import *
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'dakota', 'tests', 'data')
parameters_file = os.path.join(data_dir, 'vector_parameter_study_params.in')
response_labels = ['Qs_median']
model = 'hydrotrend'
output_file = 'HYDROASCII.QS'
response_statistic = 'median'
# Tests ----------------------------------------------------------------
def test_is_dakota_installed():
"""Test whether Dakota is installed."""
assert_true(is_dakota_installed())
def test_get_response_descriptors():
"""Test the get_response_descriptors function."""
assert_equal(response_labels, get_response_descriptors(parameters_file))
def test_get_response_descriptors_unknown_file():
"""Test get_response_descriptors when parameters file not found."""
assert_is_none(get_response_descriptors('foo.in'))
def test_get_analysis_components():
"""Test the get_analysis_components function."""
ac = get_analysis_components(parameters_file)
assert_equal(model, ac.pop(0))
response = ac.pop(0)
assert_equal(response['file'], output_file)
assert_equal(response['statistic'], response_statistic)
def test_get_analysis_components_unknown_file():
"""Test get_analysis_components when parameters file not found."""
assert_is_none(get_analysis_components('foo.in'))
|
Add unit tests for dakota.dakota_utils
|
Add unit tests for dakota.dakota_utils
|
Python
|
mit
|
csdms/dakota,csdms/dakota
|
Add unit tests for dakota.dakota_utils
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_utils module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
from nose.tools import *
from dakota.dakota_utils import *
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'dakota', 'tests', 'data')
parameters_file = os.path.join(data_dir, 'vector_parameter_study_params.in')
response_labels = ['Qs_median']
model = 'hydrotrend'
output_file = 'HYDROASCII.QS'
response_statistic = 'median'
# Tests ----------------------------------------------------------------
def test_is_dakota_installed():
"""Test whether Dakota is installed."""
assert_true(is_dakota_installed())
def test_get_response_descriptors():
"""Test the get_response_descriptors function."""
assert_equal(response_labels, get_response_descriptors(parameters_file))
def test_get_response_descriptors_unknown_file():
"""Test get_response_descriptors when parameters file not found."""
assert_is_none(get_response_descriptors('foo.in'))
def test_get_analysis_components():
"""Test the get_analysis_components function."""
ac = get_analysis_components(parameters_file)
assert_equal(model, ac.pop(0))
response = ac.pop(0)
assert_equal(response['file'], output_file)
assert_equal(response['statistic'], response_statistic)
def test_get_analysis_components_unknown_file():
"""Test get_analysis_components when parameters file not found."""
assert_is_none(get_analysis_components('foo.in'))
|
<commit_before><commit_msg>Add unit tests for dakota.dakota_utils<commit_after>
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_utils module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
from nose.tools import *
from dakota.dakota_utils import *
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'dakota', 'tests', 'data')
parameters_file = os.path.join(data_dir, 'vector_parameter_study_params.in')
response_labels = ['Qs_median']
model = 'hydrotrend'
output_file = 'HYDROASCII.QS'
response_statistic = 'median'
# Tests ----------------------------------------------------------------
def test_is_dakota_installed():
"""Test whether Dakota is installed."""
assert_true(is_dakota_installed())
def test_get_response_descriptors():
"""Test the get_response_descriptors function."""
assert_equal(response_labels, get_response_descriptors(parameters_file))
def test_get_response_descriptors_unknown_file():
"""Test get_response_descriptors when parameters file not found."""
assert_is_none(get_response_descriptors('foo.in'))
def test_get_analysis_components():
"""Test the get_analysis_components function."""
ac = get_analysis_components(parameters_file)
assert_equal(model, ac.pop(0))
response = ac.pop(0)
assert_equal(response['file'], output_file)
assert_equal(response['statistic'], response_statistic)
def test_get_analysis_components_unknown_file():
"""Test get_analysis_components when parameters file not found."""
assert_is_none(get_analysis_components('foo.in'))
|
Add unit tests for dakota.dakota_utils#!/usr/bin/env python
#
# Tests for dakota.dakota_utils module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
from nose.tools import *
from dakota.dakota_utils import *
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'dakota', 'tests', 'data')
parameters_file = os.path.join(data_dir, 'vector_parameter_study_params.in')
response_labels = ['Qs_median']
model = 'hydrotrend'
output_file = 'HYDROASCII.QS'
response_statistic = 'median'
# Tests ----------------------------------------------------------------
def test_is_dakota_installed():
"""Test whether Dakota is installed."""
assert_true(is_dakota_installed())
def test_get_response_descriptors():
"""Test the get_response_descriptors function."""
assert_equal(response_labels, get_response_descriptors(parameters_file))
def test_get_response_descriptors_unknown_file():
"""Test get_response_descriptors when parameters file not found."""
assert_is_none(get_response_descriptors('foo.in'))
def test_get_analysis_components():
"""Test the get_analysis_components function."""
ac = get_analysis_components(parameters_file)
assert_equal(model, ac.pop(0))
response = ac.pop(0)
assert_equal(response['file'], output_file)
assert_equal(response['statistic'], response_statistic)
def test_get_analysis_components_unknown_file():
"""Test get_analysis_components when parameters file not found."""
assert_is_none(get_analysis_components('foo.in'))
|
<commit_before><commit_msg>Add unit tests for dakota.dakota_utils<commit_after>#!/usr/bin/env python
#
# Tests for dakota.dakota_utils module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
from nose.tools import *
from dakota.dakota_utils import *
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'dakota', 'tests', 'data')
parameters_file = os.path.join(data_dir, 'vector_parameter_study_params.in')
response_labels = ['Qs_median']
model = 'hydrotrend'
output_file = 'HYDROASCII.QS'
response_statistic = 'median'
# Tests ----------------------------------------------------------------
def test_is_dakota_installed():
"""Test whether Dakota is installed."""
assert_true(is_dakota_installed())
def test_get_response_descriptors():
"""Test the get_response_descriptors function."""
assert_equal(response_labels, get_response_descriptors(parameters_file))
def test_get_response_descriptors_unknown_file():
"""Test get_response_descriptors when parameters file not found."""
assert_is_none(get_response_descriptors('foo.in'))
def test_get_analysis_components():
"""Test the get_analysis_components function."""
ac = get_analysis_components(parameters_file)
assert_equal(model, ac.pop(0))
response = ac.pop(0)
assert_equal(response['file'], output_file)
assert_equal(response['statistic'], response_statistic)
def test_get_analysis_components_unknown_file():
"""Test get_analysis_components when parameters file not found."""
assert_is_none(get_analysis_components('foo.in'))
|
|
cc744124bafd3b26cec54d8ef8dc5b08ad7e0f9f
|
olympiad/christmas_tree.py
|
olympiad/christmas_tree.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate a christmas tree in ASCII.
#
# Keyword arguments:
# z -- the height of the christmas tree minus the base (height = z + 1)
# Returns a multiple line string containing the ASCII christmas tree.
def tree(z):
# This is evil!
return "\n".join("".join(l) \
for l in map((lambda y: list(("*", "-")[x < y or x > 2*z-y-2] \
for x in range(0, 2*z-1))), reversed([z-1]+list(range(0, z)))))
if __name__ == "__main__":
print(tree(int(input())))
|
Add solution for problem 1a
|
Add solution for problem 1a
|
Python
|
apache-2.0
|
fabianm/olympiad,fabianm/olympiad,fabianm/olympiad
|
Add solution for problem 1a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate a christmas tree in ASCII.
#
# Keyword arguments:
# z -- the height of the christmas tree minus the base (height = z + 1)
# Returns a multiple line string containing the ASCII christmas tree.
def tree(z):
# This is evil!
return "\n".join("".join(l) \
for l in map((lambda y: list(("*", "-")[x < y or x > 2*z-y-2] \
for x in range(0, 2*z-1))), reversed([z-1]+list(range(0, z)))))
if __name__ == "__main__":
print(tree(int(input())))
|
<commit_before><commit_msg>Add solution for problem 1a<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate a christmas tree in ASCII.
#
# Keyword arguments:
# z -- the height of the christmas tree minus the base (height = z + 1)
# Returns a multiple line string containing the ASCII christmas tree.
def tree(z):
# This is evil!
return "\n".join("".join(l) \
for l in map((lambda y: list(("*", "-")[x < y or x > 2*z-y-2] \
for x in range(0, 2*z-1))), reversed([z-1]+list(range(0, z)))))
if __name__ == "__main__":
print(tree(int(input())))
|
Add solution for problem 1a#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate a christmas tree in ASCII.
#
# Keyword arguments:
# z -- the height of the christmas tree minus the base (height = z + 1)
# Returns a multiple line string containing the ASCII christmas tree.
def tree(z):
# This is evil!
return "\n".join("".join(l) \
for l in map((lambda y: list(("*", "-")[x < y or x > 2*z-y-2] \
for x in range(0, 2*z-1))), reversed([z-1]+list(range(0, z)))))
if __name__ == "__main__":
print(tree(int(input())))
|
<commit_before><commit_msg>Add solution for problem 1a<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate a christmas tree in ASCII.
#
# Keyword arguments:
# z -- the height of the christmas tree minus the base (height = z + 1)
# Returns a multiple line string containing the ASCII christmas tree.
def tree(z):
# This is evil!
return "\n".join("".join(l) \
for l in map((lambda y: list(("*", "-")[x < y or x > 2*z-y-2] \
for x in range(0, 2*z-1))), reversed([z-1]+list(range(0, z)))))
if __name__ == "__main__":
print(tree(int(input())))
|
|
93cee311a2044f52e41596c0e581a7d30926f46c
|
sahara/tests/unit/utils/test_configs.py
|
sahara/tests/unit/utils/test_configs.py
|
# Copyright (c) 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.utils import configs
class ConfigsTestCase(testtools.TestCase):
def test_merge_configs(self):
a = {
'HDFS': {
'param1': 'value1',
'param2': 'value2'
}
}
b = {
'HDFS': {
'param1': 'value3',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
res = configs.merge_configs(a, b)
expected = {
'HDFS': {
'param1': 'value3',
'param2': 'value2',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
self.assertEqual(expected, res)
|
Add configs unit test case
|
Add configs unit test case
Change-Id: I9a9bb85226059cf6421282d72e4f6b8fd4df7d7f
|
Python
|
apache-2.0
|
egafford/sahara,crobby/sahara,openstack/sahara,crobby/sahara,openstack/sahara,crobby/sahara,tellesnobrega/sahara,zhangjunli177/sahara,egafford/sahara,ekasitk/sahara,ekasitk/sahara,tellesnobrega/sahara,zhangjunli177/sahara,ekasitk/sahara,zhangjunli177/sahara
|
Add configs unit test case
Change-Id: I9a9bb85226059cf6421282d72e4f6b8fd4df7d7f
|
# Copyright (c) 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.utils import configs
class ConfigsTestCase(testtools.TestCase):
def test_merge_configs(self):
a = {
'HDFS': {
'param1': 'value1',
'param2': 'value2'
}
}
b = {
'HDFS': {
'param1': 'value3',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
res = configs.merge_configs(a, b)
expected = {
'HDFS': {
'param1': 'value3',
'param2': 'value2',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
self.assertEqual(expected, res)
|
<commit_before><commit_msg>Add configs unit test case
Change-Id: I9a9bb85226059cf6421282d72e4f6b8fd4df7d7f<commit_after>
|
# Copyright (c) 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.utils import configs
class ConfigsTestCase(testtools.TestCase):
def test_merge_configs(self):
a = {
'HDFS': {
'param1': 'value1',
'param2': 'value2'
}
}
b = {
'HDFS': {
'param1': 'value3',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
res = configs.merge_configs(a, b)
expected = {
'HDFS': {
'param1': 'value3',
'param2': 'value2',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
self.assertEqual(expected, res)
|
Add configs unit test case
Change-Id: I9a9bb85226059cf6421282d72e4f6b8fd4df7d7f# Copyright (c) 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.utils import configs
class ConfigsTestCase(testtools.TestCase):
def test_merge_configs(self):
a = {
'HDFS': {
'param1': 'value1',
'param2': 'value2'
}
}
b = {
'HDFS': {
'param1': 'value3',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
res = configs.merge_configs(a, b)
expected = {
'HDFS': {
'param1': 'value3',
'param2': 'value2',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
self.assertEqual(expected, res)
|
<commit_before><commit_msg>Add configs unit test case
Change-Id: I9a9bb85226059cf6421282d72e4f6b8fd4df7d7f<commit_after># Copyright (c) 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.utils import configs
class ConfigsTestCase(testtools.TestCase):
def test_merge_configs(self):
a = {
'HDFS': {
'param1': 'value1',
'param2': 'value2'
}
}
b = {
'HDFS': {
'param1': 'value3',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
res = configs.merge_configs(a, b)
expected = {
'HDFS': {
'param1': 'value3',
'param2': 'value2',
'param3': 'value4'
},
'YARN': {
'param5': 'value5'
}
}
self.assertEqual(expected, res)
|
|
8c9cd95e473ded8f9716705a6ef27c420c8f7b8d
|
monitors/migrations/0003_certificatesubscription.py
|
monitors/migrations/0003_certificatesubscription.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('monitors', '0002_certificate_monitors_and_ulimited_alert_text'),
]
operations = [
migrations.CreateModel(
name='CertificateSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.AlterUniqueTogether(
name='certificatemonitor',
unique_together=set([]),
),
migrations.AddField(
model_name='certificatesubscription',
name='certificate',
field=models.ForeignKey(to='monitors.CertificateMonitor', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='certificatesubscription',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=django.db.models.deletion.PROTECT),
),
]
|
Add migration file to generate certificatesubscription table
|
Add migration file to generate certificatesubscription table
|
Python
|
mit
|
gdit-cnd/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID,gdit-cnd/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID
|
Add migration file to generate certificatesubscription table
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('monitors', '0002_certificate_monitors_and_ulimited_alert_text'),
]
operations = [
migrations.CreateModel(
name='CertificateSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.AlterUniqueTogether(
name='certificatemonitor',
unique_together=set([]),
),
migrations.AddField(
model_name='certificatesubscription',
name='certificate',
field=models.ForeignKey(to='monitors.CertificateMonitor', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='certificatesubscription',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=django.db.models.deletion.PROTECT),
),
]
|
<commit_before><commit_msg>Add migration file to generate certificatesubscription table<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('monitors', '0002_certificate_monitors_and_ulimited_alert_text'),
]
operations = [
migrations.CreateModel(
name='CertificateSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.AlterUniqueTogether(
name='certificatemonitor',
unique_together=set([]),
),
migrations.AddField(
model_name='certificatesubscription',
name='certificate',
field=models.ForeignKey(to='monitors.CertificateMonitor', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='certificatesubscription',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=django.db.models.deletion.PROTECT),
),
]
|
Add migration file to generate certificatesubscription table# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('monitors', '0002_certificate_monitors_and_ulimited_alert_text'),
]
operations = [
migrations.CreateModel(
name='CertificateSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.AlterUniqueTogether(
name='certificatemonitor',
unique_together=set([]),
),
migrations.AddField(
model_name='certificatesubscription',
name='certificate',
field=models.ForeignKey(to='monitors.CertificateMonitor', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='certificatesubscription',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=django.db.models.deletion.PROTECT),
),
]
|
<commit_before><commit_msg>Add migration file to generate certificatesubscription table<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('monitors', '0002_certificate_monitors_and_ulimited_alert_text'),
]
operations = [
migrations.CreateModel(
name='CertificateSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
],
),
migrations.AlterUniqueTogether(
name='certificatemonitor',
unique_together=set([]),
),
migrations.AddField(
model_name='certificatesubscription',
name='certificate',
field=models.ForeignKey(to='monitors.CertificateMonitor', on_delete=django.db.models.deletion.PROTECT),
),
migrations.AddField(
model_name='certificatesubscription',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=django.db.models.deletion.PROTECT),
),
]
|
|
2f1eb9eda871cd2b7cd0f9300d15cdedab609248
|
scripts/generate_bus_ranking.py
|
scripts/generate_bus_ranking.py
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route_id].append(rd)
results = []
for route_id in routedates:
route = routedates[route_id][0].route
rds = routedates[route_id]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
route.id,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
route.short_name,
route.long_name,
route.agency.name,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i in range(50):
res = results[i]
desc = '\t'.join(res[4:])
out = f'{i+1}\t{res[0]}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
Add script to aggregate RouteDates and display ranked lists of routes.
|
Add script to aggregate RouteDates and display ranked lists of routes.
|
Python
|
mit
|
katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming
|
Add script to aggregate RouteDates and display ranked lists of routes.
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route_id].append(rd)
results = []
for route_id in routedates:
route = routedates[route_id][0].route
rds = routedates[route_id]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
route.id,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
route.short_name,
route.long_name,
route.agency.name,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i in range(50):
res = results[i]
desc = '\t'.join(res[4:])
out = f'{i+1}\t{res[0]}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
<commit_before><commit_msg>Add script to aggregate RouteDates and display ranked lists of routes.<commit_after>
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route_id].append(rd)
results = []
for route_id in routedates:
route = routedates[route_id][0].route
rds = routedates[route_id]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
route.id,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
route.short_name,
route.long_name,
route.agency.name,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i in range(50):
res = results[i]
desc = '\t'.join(res[4:])
out = f'{i+1}\t{res[0]}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
Add script to aggregate RouteDates and display ranked lists of routes.from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route_id].append(rd)
results = []
for route_id in routedates:
route = routedates[route_id][0].route
rds = routedates[route_id]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
route.id,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
route.short_name,
route.long_name,
route.agency.name,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i in range(50):
res = results[i]
desc = '\t'.join(res[4:])
out = f'{i+1}\t{res[0]}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
<commit_before><commit_msg>Add script to aggregate RouteDates and display ranked lists of routes.<commit_after>from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route_id].append(rd)
results = []
for route_id in routedates:
route = routedates[route_id][0].route
rds = routedates[route_id]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
route.id,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
route.short_name,
route.long_name,
route.agency.name,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i in range(50):
res = results[i]
desc = '\t'.join(res[4:])
out = f'{i+1}\t{res[0]}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
|
67e0765e3bc98720fbda5febffdd5d4c3b9865ef
|
opps/article/urls.py
|
opps/article/urls.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls.defaults import patterns, url
from django.conf.urls import include
from opps.article.views import OppsDetail
urlpatterns = patterns('',
url(r'^redactor/', include('redactor.urls')),
url(r'^(?P<channel__slug_name>[0-9A-Za-z-_.//]+)/(?P<slug>[\w-]+)$',
OppsDetail.as_view(), name='home'),
)
|
Create basic url on opps article
|
Create basic url on opps article
|
Python
|
mit
|
williamroot/opps,williamroot/opps,opps/opps,opps/opps,opps/opps,williamroot/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,opps/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps
|
Create basic url on opps article
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls.defaults import patterns, url
from django.conf.urls import include
from opps.article.views import OppsDetail
urlpatterns = patterns('',
url(r'^redactor/', include('redactor.urls')),
url(r'^(?P<channel__slug_name>[0-9A-Za-z-_.//]+)/(?P<slug>[\w-]+)$',
OppsDetail.as_view(), name='home'),
)
|
<commit_before><commit_msg>Create basic url on opps article<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls.defaults import patterns, url
from django.conf.urls import include
from opps.article.views import OppsDetail
urlpatterns = patterns('',
url(r'^redactor/', include('redactor.urls')),
url(r'^(?P<channel__slug_name>[0-9A-Za-z-_.//]+)/(?P<slug>[\w-]+)$',
OppsDetail.as_view(), name='home'),
)
|
Create basic url on opps article#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls.defaults import patterns, url
from django.conf.urls import include
from opps.article.views import OppsDetail
urlpatterns = patterns('',
url(r'^redactor/', include('redactor.urls')),
url(r'^(?P<channel__slug_name>[0-9A-Za-z-_.//]+)/(?P<slug>[\w-]+)$',
OppsDetail.as_view(), name='home'),
)
|
<commit_before><commit_msg>Create basic url on opps article<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls.defaults import patterns, url
from django.conf.urls import include
from opps.article.views import OppsDetail
urlpatterns = patterns('',
url(r'^redactor/', include('redactor.urls')),
url(r'^(?P<channel__slug_name>[0-9A-Za-z-_.//]+)/(?P<slug>[\w-]+)$',
OppsDetail.as_view(), name='home'),
)
|
|
411e42feab4d5102cb7f7c591afaa3404fe76912
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_create_negative.py
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_create_negative.py
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import six
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalNodeCreateNegativeTests(base.TestCase):
"""Negative tests for node create command."""
def setUp(self):
super(BaremetalNodeCreateNegativeTests, self).setUp()
@staticmethod
def constuct_cmd(base_cmd, argument, value):
cmd = base_cmd
if argument:
cmd = '{0} {1} {2}'.format(cmd, argument, value)
return cmd
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('--uuid', '0000 0000', 'unrecognized arguments'),
('--driver-info', '', 'expected one argument'),
('--driver-info', 'some info', 'unrecognized arguments'),
('--property', '', 'expected one argument'),
('--property', 'some property', 'unrecognized arguments'),
('--extra', '', 'expected one argument'),
('--extra', 'some extra', 'unrecognized arguments'),
('--name', '', 'expected one argument'),
('--name', 'some name', 'unrecognized arguments'),
('--network-interface', '', 'expected one argument'),
('--resource-class', '', 'expected one argument'))
@ddt.unpack
def test_baremetal_node_create(self, argument, value, ex_text):
base_cmd = 'baremetal node create --driver fake'
command = self.constuct_cmd(base_cmd, argument, value)
six.assertRaisesRegex(self, exceptions.CommandFailed, ex_text,
self.openstack, command)
|
Add negative test-cases for openstack node create command
|
Add negative test-cases for openstack node create command
Change-Id: Icf7afa131f3cc07c19df88bd254b4aa823c6bef4
Partial-Bug: #1630288
|
Python
|
apache-2.0
|
openstack/python-ironicclient,openstack/python-ironicclient
|
Add negative test-cases for openstack node create command
Change-Id: Icf7afa131f3cc07c19df88bd254b4aa823c6bef4
Partial-Bug: #1630288
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import six
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalNodeCreateNegativeTests(base.TestCase):
"""Negative tests for node create command."""
def setUp(self):
super(BaremetalNodeCreateNegativeTests, self).setUp()
@staticmethod
def constuct_cmd(base_cmd, argument, value):
cmd = base_cmd
if argument:
cmd = '{0} {1} {2}'.format(cmd, argument, value)
return cmd
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('--uuid', '0000 0000', 'unrecognized arguments'),
('--driver-info', '', 'expected one argument'),
('--driver-info', 'some info', 'unrecognized arguments'),
('--property', '', 'expected one argument'),
('--property', 'some property', 'unrecognized arguments'),
('--extra', '', 'expected one argument'),
('--extra', 'some extra', 'unrecognized arguments'),
('--name', '', 'expected one argument'),
('--name', 'some name', 'unrecognized arguments'),
('--network-interface', '', 'expected one argument'),
('--resource-class', '', 'expected one argument'))
@ddt.unpack
def test_baremetal_node_create(self, argument, value, ex_text):
base_cmd = 'baremetal node create --driver fake'
command = self.constuct_cmd(base_cmd, argument, value)
six.assertRaisesRegex(self, exceptions.CommandFailed, ex_text,
self.openstack, command)
|
<commit_before><commit_msg>Add negative test-cases for openstack node create command
Change-Id: Icf7afa131f3cc07c19df88bd254b4aa823c6bef4
Partial-Bug: #1630288<commit_after>
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import six
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalNodeCreateNegativeTests(base.TestCase):
"""Negative tests for node create command."""
def setUp(self):
super(BaremetalNodeCreateNegativeTests, self).setUp()
@staticmethod
def constuct_cmd(base_cmd, argument, value):
cmd = base_cmd
if argument:
cmd = '{0} {1} {2}'.format(cmd, argument, value)
return cmd
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('--uuid', '0000 0000', 'unrecognized arguments'),
('--driver-info', '', 'expected one argument'),
('--driver-info', 'some info', 'unrecognized arguments'),
('--property', '', 'expected one argument'),
('--property', 'some property', 'unrecognized arguments'),
('--extra', '', 'expected one argument'),
('--extra', 'some extra', 'unrecognized arguments'),
('--name', '', 'expected one argument'),
('--name', 'some name', 'unrecognized arguments'),
('--network-interface', '', 'expected one argument'),
('--resource-class', '', 'expected one argument'))
@ddt.unpack
def test_baremetal_node_create(self, argument, value, ex_text):
base_cmd = 'baremetal node create --driver fake'
command = self.constuct_cmd(base_cmd, argument, value)
six.assertRaisesRegex(self, exceptions.CommandFailed, ex_text,
self.openstack, command)
|
Add negative test-cases for openstack node create command
Change-Id: Icf7afa131f3cc07c19df88bd254b4aa823c6bef4
Partial-Bug: #1630288# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import six
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalNodeCreateNegativeTests(base.TestCase):
"""Negative tests for node create command."""
def setUp(self):
super(BaremetalNodeCreateNegativeTests, self).setUp()
@staticmethod
def constuct_cmd(base_cmd, argument, value):
cmd = base_cmd
if argument:
cmd = '{0} {1} {2}'.format(cmd, argument, value)
return cmd
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('--uuid', '0000 0000', 'unrecognized arguments'),
('--driver-info', '', 'expected one argument'),
('--driver-info', 'some info', 'unrecognized arguments'),
('--property', '', 'expected one argument'),
('--property', 'some property', 'unrecognized arguments'),
('--extra', '', 'expected one argument'),
('--extra', 'some extra', 'unrecognized arguments'),
('--name', '', 'expected one argument'),
('--name', 'some name', 'unrecognized arguments'),
('--network-interface', '', 'expected one argument'),
('--resource-class', '', 'expected one argument'))
@ddt.unpack
def test_baremetal_node_create(self, argument, value, ex_text):
base_cmd = 'baremetal node create --driver fake'
command = self.constuct_cmd(base_cmd, argument, value)
six.assertRaisesRegex(self, exceptions.CommandFailed, ex_text,
self.openstack, command)
|
<commit_before><commit_msg>Add negative test-cases for openstack node create command
Change-Id: Icf7afa131f3cc07c19df88bd254b4aa823c6bef4
Partial-Bug: #1630288<commit_after># Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import six
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalNodeCreateNegativeTests(base.TestCase):
"""Negative tests for node create command."""
def setUp(self):
super(BaremetalNodeCreateNegativeTests, self).setUp()
@staticmethod
def constuct_cmd(base_cmd, argument, value):
cmd = base_cmd
if argument:
cmd = '{0} {1} {2}'.format(cmd, argument, value)
return cmd
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('--uuid', '0000 0000', 'unrecognized arguments'),
('--driver-info', '', 'expected one argument'),
('--driver-info', 'some info', 'unrecognized arguments'),
('--property', '', 'expected one argument'),
('--property', 'some property', 'unrecognized arguments'),
('--extra', '', 'expected one argument'),
('--extra', 'some extra', 'unrecognized arguments'),
('--name', '', 'expected one argument'),
('--name', 'some name', 'unrecognized arguments'),
('--network-interface', '', 'expected one argument'),
('--resource-class', '', 'expected one argument'))
@ddt.unpack
def test_baremetal_node_create(self, argument, value, ex_text):
base_cmd = 'baremetal node create --driver fake'
command = self.constuct_cmd(base_cmd, argument, value)
six.assertRaisesRegex(self, exceptions.CommandFailed, ex_text,
self.openstack, command)
|
|
9701566d43905d858ab01d82aa414d5b5a93f8f6
|
test/test_text_api.py
|
test/test_text_api.py
|
"""
This module tests the text api module (pyqode.core.text)
"""
import os
from PyQt4 import QtGui
from PyQt4.QtTest import QTest
import sys
from pyqode.core.editor import QCodeEdit
from pyqode.core import client
from pyqode.core import text
from .helpers import cwd_at
app = None
editor = None
window = None
def process_events():
global app
app.processEvents()
@cwd_at('test')
def setup_module():
"""
Setup a QApplication and QCodeEdit which open the client module code
"""
global app, editor, window
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
editor = QCodeEdit(window)
window.setCentralWidget(editor)
editor.open_file(client.__file__)
window.show()
client.start_server(editor, os.path.join(os.getcwd(), 'server.py'))
while not client.connected(editor):
QTest.qWait(100)
def teardown():
"""
Close server and exit QApplication
"""
global editor, app
client.stop_server(editor)
app.exit(0)
QTest.qWait(1000)
del editor
del app
def test_goto_line():
global editor, window
QTest.qWaitForWindowShown(window)
assert editor.textCursor().blockNumber() == 0
assert editor.textCursor().columnNumber() == 0
cursor = text.goto_line(editor, 2, 0, move=False)
process_events()
assert editor.textCursor().blockNumber() != cursor.blockNumber()
assert editor.textCursor().columnNumber() == cursor.columnNumber()
cursor = text.goto_line(editor, 2, move=True)
process_events()
assert editor.textCursor().blockNumber() == cursor.blockNumber() == 1
assert editor.textCursor().columnNumber() == cursor.columnNumber()
|
Add a test for goto_line
|
Add a test for goto_line
|
Python
|
mit
|
pyQode/pyqode.core,pyQode/pyqode.core,zwadar/pyqode.core
|
Add a test for goto_line
|
"""
This module tests the text api module (pyqode.core.text)
"""
import os
from PyQt4 import QtGui
from PyQt4.QtTest import QTest
import sys
from pyqode.core.editor import QCodeEdit
from pyqode.core import client
from pyqode.core import text
from .helpers import cwd_at
app = None
editor = None
window = None
def process_events():
global app
app.processEvents()
@cwd_at('test')
def setup_module():
"""
Setup a QApplication and QCodeEdit which open the client module code
"""
global app, editor, window
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
editor = QCodeEdit(window)
window.setCentralWidget(editor)
editor.open_file(client.__file__)
window.show()
client.start_server(editor, os.path.join(os.getcwd(), 'server.py'))
while not client.connected(editor):
QTest.qWait(100)
def teardown():
"""
Close server and exit QApplication
"""
global editor, app
client.stop_server(editor)
app.exit(0)
QTest.qWait(1000)
del editor
del app
def test_goto_line():
global editor, window
QTest.qWaitForWindowShown(window)
assert editor.textCursor().blockNumber() == 0
assert editor.textCursor().columnNumber() == 0
cursor = text.goto_line(editor, 2, 0, move=False)
process_events()
assert editor.textCursor().blockNumber() != cursor.blockNumber()
assert editor.textCursor().columnNumber() == cursor.columnNumber()
cursor = text.goto_line(editor, 2, move=True)
process_events()
assert editor.textCursor().blockNumber() == cursor.blockNumber() == 1
assert editor.textCursor().columnNumber() == cursor.columnNumber()
|
<commit_before><commit_msg>Add a test for goto_line<commit_after>
|
"""
This module tests the text api module (pyqode.core.text)
"""
import os
from PyQt4 import QtGui
from PyQt4.QtTest import QTest
import sys
from pyqode.core.editor import QCodeEdit
from pyqode.core import client
from pyqode.core import text
from .helpers import cwd_at
app = None
editor = None
window = None
def process_events():
global app
app.processEvents()
@cwd_at('test')
def setup_module():
"""
Setup a QApplication and QCodeEdit which open the client module code
"""
global app, editor, window
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
editor = QCodeEdit(window)
window.setCentralWidget(editor)
editor.open_file(client.__file__)
window.show()
client.start_server(editor, os.path.join(os.getcwd(), 'server.py'))
while not client.connected(editor):
QTest.qWait(100)
def teardown():
"""
Close server and exit QApplication
"""
global editor, app
client.stop_server(editor)
app.exit(0)
QTest.qWait(1000)
del editor
del app
def test_goto_line():
global editor, window
QTest.qWaitForWindowShown(window)
assert editor.textCursor().blockNumber() == 0
assert editor.textCursor().columnNumber() == 0
cursor = text.goto_line(editor, 2, 0, move=False)
process_events()
assert editor.textCursor().blockNumber() != cursor.blockNumber()
assert editor.textCursor().columnNumber() == cursor.columnNumber()
cursor = text.goto_line(editor, 2, move=True)
process_events()
assert editor.textCursor().blockNumber() == cursor.blockNumber() == 1
assert editor.textCursor().columnNumber() == cursor.columnNumber()
|
Add a test for goto_line"""
This module tests the text api module (pyqode.core.text)
"""
import os
from PyQt4 import QtGui
from PyQt4.QtTest import QTest
import sys
from pyqode.core.editor import QCodeEdit
from pyqode.core import client
from pyqode.core import text
from .helpers import cwd_at
app = None
editor = None
window = None
def process_events():
global app
app.processEvents()
@cwd_at('test')
def setup_module():
"""
Setup a QApplication and QCodeEdit which open the client module code
"""
global app, editor, window
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
editor = QCodeEdit(window)
window.setCentralWidget(editor)
editor.open_file(client.__file__)
window.show()
client.start_server(editor, os.path.join(os.getcwd(), 'server.py'))
while not client.connected(editor):
QTest.qWait(100)
def teardown():
"""
Close server and exit QApplication
"""
global editor, app
client.stop_server(editor)
app.exit(0)
QTest.qWait(1000)
del editor
del app
def test_goto_line():
global editor, window
QTest.qWaitForWindowShown(window)
assert editor.textCursor().blockNumber() == 0
assert editor.textCursor().columnNumber() == 0
cursor = text.goto_line(editor, 2, 0, move=False)
process_events()
assert editor.textCursor().blockNumber() != cursor.blockNumber()
assert editor.textCursor().columnNumber() == cursor.columnNumber()
cursor = text.goto_line(editor, 2, move=True)
process_events()
assert editor.textCursor().blockNumber() == cursor.blockNumber() == 1
assert editor.textCursor().columnNumber() == cursor.columnNumber()
|
<commit_before><commit_msg>Add a test for goto_line<commit_after>"""
This module tests the text api module (pyqode.core.text)
"""
import os
from PyQt4 import QtGui
from PyQt4.QtTest import QTest
import sys
from pyqode.core.editor import QCodeEdit
from pyqode.core import client
from pyqode.core import text
from .helpers import cwd_at
app = None
editor = None
window = None
def process_events():
global app
app.processEvents()
@cwd_at('test')
def setup_module():
"""
Setup a QApplication and QCodeEdit which open the client module code
"""
global app, editor, window
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
editor = QCodeEdit(window)
window.setCentralWidget(editor)
editor.open_file(client.__file__)
window.show()
client.start_server(editor, os.path.join(os.getcwd(), 'server.py'))
while not client.connected(editor):
QTest.qWait(100)
def teardown():
"""
Close server and exit QApplication
"""
global editor, app
client.stop_server(editor)
app.exit(0)
QTest.qWait(1000)
del editor
del app
def test_goto_line():
global editor, window
QTest.qWaitForWindowShown(window)
assert editor.textCursor().blockNumber() == 0
assert editor.textCursor().columnNumber() == 0
cursor = text.goto_line(editor, 2, 0, move=False)
process_events()
assert editor.textCursor().blockNumber() != cursor.blockNumber()
assert editor.textCursor().columnNumber() == cursor.columnNumber()
cursor = text.goto_line(editor, 2, move=True)
process_events()
assert editor.textCursor().blockNumber() == cursor.blockNumber() == 1
assert editor.textCursor().columnNumber() == cursor.columnNumber()
|
|
501a52ae39a63f58e2de2f7f31c6eb82e49f2e0a
|
comics/comics/hagarthehorrible.py
|
comics/comics/hagarthehorrible.py
|
# encoding: utf-8
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Hägar the Horrible'
language = 'en'
url = 'https://www.comicskingdom.com/hagar-the-horrible'
rights = 'Chris Browne'
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
return self.crawl_helper('hagar-the-horrible', pub_date)
|
Add crawler for "Hägar the Horrible"
|
Add crawler for "Hägar the Horrible"
|
Python
|
agpl-3.0
|
datagutten/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics
|
Add crawler for "Hägar the Horrible"
|
# encoding: utf-8
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Hägar the Horrible'
language = 'en'
url = 'https://www.comicskingdom.com/hagar-the-horrible'
rights = 'Chris Browne'
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
return self.crawl_helper('hagar-the-horrible', pub_date)
|
<commit_before><commit_msg>Add crawler for "Hägar the Horrible"<commit_after>
|
# encoding: utf-8
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Hägar the Horrible'
language = 'en'
url = 'https://www.comicskingdom.com/hagar-the-horrible'
rights = 'Chris Browne'
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
return self.crawl_helper('hagar-the-horrible', pub_date)
|
Add crawler for "Hägar the Horrible"# encoding: utf-8
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Hägar the Horrible'
language = 'en'
url = 'https://www.comicskingdom.com/hagar-the-horrible'
rights = 'Chris Browne'
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
return self.crawl_helper('hagar-the-horrible', pub_date)
|
<commit_before><commit_msg>Add crawler for "Hägar the Horrible"<commit_after># encoding: utf-8
from comics.aggregator.crawler import ComicsKingdomCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Hägar the Horrible'
language = 'en'
url = 'https://www.comicskingdom.com/hagar-the-horrible'
rights = 'Chris Browne'
class Crawler(ComicsKingdomCrawlerBase):
history_capable_days = 6
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
return self.crawl_helper('hagar-the-horrible', pub_date)
|
|
11a471ae01b1ad80f090d952a744679aac6d5d15
|
polymorphic/utils.py
|
polymorphic/utils.py
|
from django.contrib.contenttypes.models import ContentType
def reset_polymorphic_ctype(*models, **filters):
"""
Set the polymorphic content-type ID field to the proper model
Sort the ``*models`` from base class to descending class,
to make sure the content types are properly assigned.
Add ``preserve_existing=True`` to skip models which already
have a polymorphic content type.
"""
preserve_existing = filters.pop('preserve_existing', False)
for new_model in models:
new_ct = ContentType.objects.get_for_model(new_model)
qs = new_model.objects.all()
if preserve_existing:
qs = qs.filter(polymorphic_ctype__isnull=True)
if filters:
qs = qs.filter(**filters)
qs.update(polymorphic_ctype=new_ct)
|
Add reset_polymorphic_ctype() function to assist with migration to polymorphic
|
Add reset_polymorphic_ctype() function to assist with migration to polymorphic
|
Python
|
bsd-3-clause
|
chrisglass/django_polymorphic,skirsdeda/django_polymorphic,chrisglass/django_polymorphic,skirsdeda/django_polymorphic,skirsdeda/django_polymorphic
|
Add reset_polymorphic_ctype() function to assist with migration to polymorphic
|
from django.contrib.contenttypes.models import ContentType
def reset_polymorphic_ctype(*models, **filters):
"""
Set the polymorphic content-type ID field to the proper model
Sort the ``*models`` from base class to descending class,
to make sure the content types are properly assigned.
Add ``preserve_existing=True`` to skip models which already
have a polymorphic content type.
"""
preserve_existing = filters.pop('preserve_existing', False)
for new_model in models:
new_ct = ContentType.objects.get_for_model(new_model)
qs = new_model.objects.all()
if preserve_existing:
qs = qs.filter(polymorphic_ctype__isnull=True)
if filters:
qs = qs.filter(**filters)
qs.update(polymorphic_ctype=new_ct)
|
<commit_before><commit_msg>Add reset_polymorphic_ctype() function to assist with migration to polymorphic<commit_after>
|
from django.contrib.contenttypes.models import ContentType
def reset_polymorphic_ctype(*models, **filters):
"""
Set the polymorphic content-type ID field to the proper model
Sort the ``*models`` from base class to descending class,
to make sure the content types are properly assigned.
Add ``preserve_existing=True`` to skip models which already
have a polymorphic content type.
"""
preserve_existing = filters.pop('preserve_existing', False)
for new_model in models:
new_ct = ContentType.objects.get_for_model(new_model)
qs = new_model.objects.all()
if preserve_existing:
qs = qs.filter(polymorphic_ctype__isnull=True)
if filters:
qs = qs.filter(**filters)
qs.update(polymorphic_ctype=new_ct)
|
Add reset_polymorphic_ctype() function to assist with migration to polymorphicfrom django.contrib.contenttypes.models import ContentType
def reset_polymorphic_ctype(*models, **filters):
"""
Set the polymorphic content-type ID field to the proper model
Sort the ``*models`` from base class to descending class,
to make sure the content types are properly assigned.
Add ``preserve_existing=True`` to skip models which already
have a polymorphic content type.
"""
preserve_existing = filters.pop('preserve_existing', False)
for new_model in models:
new_ct = ContentType.objects.get_for_model(new_model)
qs = new_model.objects.all()
if preserve_existing:
qs = qs.filter(polymorphic_ctype__isnull=True)
if filters:
qs = qs.filter(**filters)
qs.update(polymorphic_ctype=new_ct)
|
<commit_before><commit_msg>Add reset_polymorphic_ctype() function to assist with migration to polymorphic<commit_after>from django.contrib.contenttypes.models import ContentType
def reset_polymorphic_ctype(*models, **filters):
"""
Set the polymorphic content-type ID field to the proper model
Sort the ``*models`` from base class to descending class,
to make sure the content types are properly assigned.
Add ``preserve_existing=True`` to skip models which already
have a polymorphic content type.
"""
preserve_existing = filters.pop('preserve_existing', False)
for new_model in models:
new_ct = ContentType.objects.get_for_model(new_model)
qs = new_model.objects.all()
if preserve_existing:
qs = qs.filter(polymorphic_ctype__isnull=True)
if filters:
qs = qs.filter(**filters)
qs.update(polymorphic_ctype=new_ct)
|
|
368980d3286237baaaf811e17c2681496ec00f8a
|
tests/v3/test_auth.py
|
tests/v3/test_auth.py
|
import puresnmp.auth as auth
def test_password_to_key():
from hashlib import md5
hasher = auth.password_to_key(md5, 16)
result = hasher(b"foo", b"bar")
expected = b"x\xf4\xdf-#\x19\x95\xe0\x8f\xcd\x1f{\xa87\x99\x06"
assert result == expected
|
Add tests for v3 auth
|
Add tests for v3 auth
|
Python
|
mit
|
exhuma/puresnmp,exhuma/puresnmp
|
Add tests for v3 auth
|
import puresnmp.auth as auth
def test_password_to_key():
from hashlib import md5
hasher = auth.password_to_key(md5, 16)
result = hasher(b"foo", b"bar")
expected = b"x\xf4\xdf-#\x19\x95\xe0\x8f\xcd\x1f{\xa87\x99\x06"
assert result == expected
|
<commit_before><commit_msg>Add tests for v3 auth<commit_after>
|
import puresnmp.auth as auth
def test_password_to_key():
from hashlib import md5
hasher = auth.password_to_key(md5, 16)
result = hasher(b"foo", b"bar")
expected = b"x\xf4\xdf-#\x19\x95\xe0\x8f\xcd\x1f{\xa87\x99\x06"
assert result == expected
|
Add tests for v3 authimport puresnmp.auth as auth
def test_password_to_key():
from hashlib import md5
hasher = auth.password_to_key(md5, 16)
result = hasher(b"foo", b"bar")
expected = b"x\xf4\xdf-#\x19\x95\xe0\x8f\xcd\x1f{\xa87\x99\x06"
assert result == expected
|
<commit_before><commit_msg>Add tests for v3 auth<commit_after>import puresnmp.auth as auth
def test_password_to_key():
from hashlib import md5
hasher = auth.password_to_key(md5, 16)
result = hasher(b"foo", b"bar")
expected = b"x\xf4\xdf-#\x19\x95\xe0\x8f\xcd\x1f{\xa87\x99\x06"
assert result == expected
|
|
69469033bf1e87230ec00850d8644f56f6ae66cb
|
opentreemap/otm1_migrator/migration_rules/sandiego.py
|
opentreemap/otm1_migrator/migration_rules/sandiego.py
|
from otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
udfs = {
'plot': {
'type': 'udf:Plot Type',
'powerline_conflict_potential': 'udf:Powerlines Overhead',
'sidewalk_damage': 'udf:Sidewalk Damage'
},
'tree': {
'condition': 'udf:Tree Condition',
'steward_user': 'udf:Tree Steward',
'sponsor': 'udf:Sponsor'
}
}
conversions = {
'plot': {
'powerline_conflict_potential': {'1': 'Yes',
'2': 'No',
'3': 'Unknown'},
'type': {'1': 'Well/Pit',
'2': 'Median/Island',
'3': 'Tree Lawn',
'4': 'Park',
'5': 'Planter',
'6': 'Other',
'7': 'Yard',
'8': 'Natural Area'},
'sidewalk_damage': {
'1': 'Minor or No Damage',
'2': 'Raised More Than 3/4 Inch'
}
},
'tree': {
'condition': {
'1': 'Dead',
'2': 'Critical',
'3': 'Poor',
'4': 'Fair',
'5': 'Good',
'6': 'Very Good',
'7': 'Excellent'
}
}
}
MIGRATION_RULES['plot']['removed_fields'] |= {'sunset_zone', 'district'}
del MIGRATION_RULES['species']['value_transformers']['native_status']
for model in {'plot', 'tree'}:
MIGRATION_RULES[model]['removed_fields'] -= set(udfs[model].keys())
for otm1name, otm2name in udfs[model].iteritems():
rules_for_model = MIGRATION_RULES[model]
udfs_fields = rules_for_model['renamed_fields']
udfs_fields[otm1name] = otm2name
if otm1name in conversions[model]:
if 'value_transformers' not in rules_for_model:
rules_for_model['value_transformers'] = {}
value_transf = rules_for_model['value_transformers']
value_transf[otm1name] = conversions[model][otm1name].get
|
Add rules for San Diego
|
Add rules for San Diego
|
Python
|
agpl-3.0
|
recklessromeo/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,maurizi/otm-core,RickMohr/otm-core,maurizi/otm-core,clever-crow-consulting/otm-core,RickMohr/otm-core,RickMohr/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,maurizi/otm-core,RickMohr/otm-core,maurizi/otm-core,recklessromeo/otm-core,clever-crow-consulting/otm-core
|
Add rules for San Diego
|
from otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
udfs = {
'plot': {
'type': 'udf:Plot Type',
'powerline_conflict_potential': 'udf:Powerlines Overhead',
'sidewalk_damage': 'udf:Sidewalk Damage'
},
'tree': {
'condition': 'udf:Tree Condition',
'steward_user': 'udf:Tree Steward',
'sponsor': 'udf:Sponsor'
}
}
conversions = {
'plot': {
'powerline_conflict_potential': {'1': 'Yes',
'2': 'No',
'3': 'Unknown'},
'type': {'1': 'Well/Pit',
'2': 'Median/Island',
'3': 'Tree Lawn',
'4': 'Park',
'5': 'Planter',
'6': 'Other',
'7': 'Yard',
'8': 'Natural Area'},
'sidewalk_damage': {
'1': 'Minor or No Damage',
'2': 'Raised More Than 3/4 Inch'
}
},
'tree': {
'condition': {
'1': 'Dead',
'2': 'Critical',
'3': 'Poor',
'4': 'Fair',
'5': 'Good',
'6': 'Very Good',
'7': 'Excellent'
}
}
}
MIGRATION_RULES['plot']['removed_fields'] |= {'sunset_zone', 'district'}
del MIGRATION_RULES['species']['value_transformers']['native_status']
for model in {'plot', 'tree'}:
MIGRATION_RULES[model]['removed_fields'] -= set(udfs[model].keys())
for otm1name, otm2name in udfs[model].iteritems():
rules_for_model = MIGRATION_RULES[model]
udfs_fields = rules_for_model['renamed_fields']
udfs_fields[otm1name] = otm2name
if otm1name in conversions[model]:
if 'value_transformers' not in rules_for_model:
rules_for_model['value_transformers'] = {}
value_transf = rules_for_model['value_transformers']
value_transf[otm1name] = conversions[model][otm1name].get
|
<commit_before><commit_msg>Add rules for San Diego<commit_after>
|
from otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
udfs = {
'plot': {
'type': 'udf:Plot Type',
'powerline_conflict_potential': 'udf:Powerlines Overhead',
'sidewalk_damage': 'udf:Sidewalk Damage'
},
'tree': {
'condition': 'udf:Tree Condition',
'steward_user': 'udf:Tree Steward',
'sponsor': 'udf:Sponsor'
}
}
conversions = {
'plot': {
'powerline_conflict_potential': {'1': 'Yes',
'2': 'No',
'3': 'Unknown'},
'type': {'1': 'Well/Pit',
'2': 'Median/Island',
'3': 'Tree Lawn',
'4': 'Park',
'5': 'Planter',
'6': 'Other',
'7': 'Yard',
'8': 'Natural Area'},
'sidewalk_damage': {
'1': 'Minor or No Damage',
'2': 'Raised More Than 3/4 Inch'
}
},
'tree': {
'condition': {
'1': 'Dead',
'2': 'Critical',
'3': 'Poor',
'4': 'Fair',
'5': 'Good',
'6': 'Very Good',
'7': 'Excellent'
}
}
}
MIGRATION_RULES['plot']['removed_fields'] |= {'sunset_zone', 'district'}
del MIGRATION_RULES['species']['value_transformers']['native_status']
for model in {'plot', 'tree'}:
MIGRATION_RULES[model]['removed_fields'] -= set(udfs[model].keys())
for otm1name, otm2name in udfs[model].iteritems():
rules_for_model = MIGRATION_RULES[model]
udfs_fields = rules_for_model['renamed_fields']
udfs_fields[otm1name] = otm2name
if otm1name in conversions[model]:
if 'value_transformers' not in rules_for_model:
rules_for_model['value_transformers'] = {}
value_transf = rules_for_model['value_transformers']
value_transf[otm1name] = conversions[model][otm1name].get
|
Add rules for San Diegofrom otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
udfs = {
'plot': {
'type': 'udf:Plot Type',
'powerline_conflict_potential': 'udf:Powerlines Overhead',
'sidewalk_damage': 'udf:Sidewalk Damage'
},
'tree': {
'condition': 'udf:Tree Condition',
'steward_user': 'udf:Tree Steward',
'sponsor': 'udf:Sponsor'
}
}
conversions = {
'plot': {
'powerline_conflict_potential': {'1': 'Yes',
'2': 'No',
'3': 'Unknown'},
'type': {'1': 'Well/Pit',
'2': 'Median/Island',
'3': 'Tree Lawn',
'4': 'Park',
'5': 'Planter',
'6': 'Other',
'7': 'Yard',
'8': 'Natural Area'},
'sidewalk_damage': {
'1': 'Minor or No Damage',
'2': 'Raised More Than 3/4 Inch'
}
},
'tree': {
'condition': {
'1': 'Dead',
'2': 'Critical',
'3': 'Poor',
'4': 'Fair',
'5': 'Good',
'6': 'Very Good',
'7': 'Excellent'
}
}
}
MIGRATION_RULES['plot']['removed_fields'] |= {'sunset_zone', 'district'}
del MIGRATION_RULES['species']['value_transformers']['native_status']
for model in {'plot', 'tree'}:
MIGRATION_RULES[model]['removed_fields'] -= set(udfs[model].keys())
for otm1name, otm2name in udfs[model].iteritems():
rules_for_model = MIGRATION_RULES[model]
udfs_fields = rules_for_model['renamed_fields']
udfs_fields[otm1name] = otm2name
if otm1name in conversions[model]:
if 'value_transformers' not in rules_for_model:
rules_for_model['value_transformers'] = {}
value_transf = rules_for_model['value_transformers']
value_transf[otm1name] = conversions[model][otm1name].get
|
<commit_before><commit_msg>Add rules for San Diego<commit_after>from otm1_migrator.migration_rules.standard_otm1 import MIGRATION_RULES
udfs = {
'plot': {
'type': 'udf:Plot Type',
'powerline_conflict_potential': 'udf:Powerlines Overhead',
'sidewalk_damage': 'udf:Sidewalk Damage'
},
'tree': {
'condition': 'udf:Tree Condition',
'steward_user': 'udf:Tree Steward',
'sponsor': 'udf:Sponsor'
}
}
conversions = {
'plot': {
'powerline_conflict_potential': {'1': 'Yes',
'2': 'No',
'3': 'Unknown'},
'type': {'1': 'Well/Pit',
'2': 'Median/Island',
'3': 'Tree Lawn',
'4': 'Park',
'5': 'Planter',
'6': 'Other',
'7': 'Yard',
'8': 'Natural Area'},
'sidewalk_damage': {
'1': 'Minor or No Damage',
'2': 'Raised More Than 3/4 Inch'
}
},
'tree': {
'condition': {
'1': 'Dead',
'2': 'Critical',
'3': 'Poor',
'4': 'Fair',
'5': 'Good',
'6': 'Very Good',
'7': 'Excellent'
}
}
}
MIGRATION_RULES['plot']['removed_fields'] |= {'sunset_zone', 'district'}
del MIGRATION_RULES['species']['value_transformers']['native_status']
for model in {'plot', 'tree'}:
MIGRATION_RULES[model]['removed_fields'] -= set(udfs[model].keys())
for otm1name, otm2name in udfs[model].iteritems():
rules_for_model = MIGRATION_RULES[model]
udfs_fields = rules_for_model['renamed_fields']
udfs_fields[otm1name] = otm2name
if otm1name in conversions[model]:
if 'value_transformers' not in rules_for_model:
rules_for_model['value_transformers'] = {}
value_transf = rules_for_model['value_transformers']
value_transf[otm1name] = conversions[model][otm1name].get
|
|
7101b30622f0b1586e6e8a7b6209b2689568fa0a
|
plyer/platforms/ios/storagepath.py
|
plyer/platforms/ios/storagepath.py
|
'''
iOS Storage Path
--------------------
'''
from plyer.facades import StoragePath
from pyobjus import autoclass
import os
NSFileManager = autoclass('NSFileManager')
# Directory constants (NSSearchPathDirectory enumeration)
NSApplicationDirectory = 1
NSDocumentDirectory = 9
NSDownloadsDirectory = 15
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSPicturesDirectory = 19
class iOSStoragePath(StoragePath):
def __init__(self):
self.defaultManager = NSFileManager.defaultManager()
def _get_home_dir(self):
return os.path.expanduser('~/')
def _get_external_storage_dir(self):
return 'This feature is not implemented for this platform.'
def _get_root_dir(self):
return 'This feature is not implemented for this platform.'
def _get_documents_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDocumentDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_downloads_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDownloadsDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_videos_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMoviesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_music_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMusicDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_pictures_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSPicturesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_application_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSApplicationDirectory, 1).firstObject().absoluteString.UTF8String()
def instance():
return iOSStoragePath()
|
Add iOS api for storage path
|
Add iOS api for storage path
|
Python
|
mit
|
KeyWeeUsr/plyer,kivy/plyer,KeyWeeUsr/plyer,KeyWeeUsr/plyer,kivy/plyer,kivy/plyer
|
Add iOS api for storage path
|
'''
iOS Storage Path
--------------------
'''
from plyer.facades import StoragePath
from pyobjus import autoclass
import os
NSFileManager = autoclass('NSFileManager')
# Directory constants (NSSearchPathDirectory enumeration)
NSApplicationDirectory = 1
NSDocumentDirectory = 9
NSDownloadsDirectory = 15
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSPicturesDirectory = 19
class iOSStoragePath(StoragePath):
def __init__(self):
self.defaultManager = NSFileManager.defaultManager()
def _get_home_dir(self):
return os.path.expanduser('~/')
def _get_external_storage_dir(self):
return 'This feature is not implemented for this platform.'
def _get_root_dir(self):
return 'This feature is not implemented for this platform.'
def _get_documents_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDocumentDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_downloads_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDownloadsDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_videos_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMoviesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_music_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMusicDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_pictures_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSPicturesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_application_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSApplicationDirectory, 1).firstObject().absoluteString.UTF8String()
def instance():
return iOSStoragePath()
|
<commit_before><commit_msg>Add iOS api for storage path<commit_after>
|
'''
iOS Storage Path
--------------------
'''
from plyer.facades import StoragePath
from pyobjus import autoclass
import os
NSFileManager = autoclass('NSFileManager')
# Directory constants (NSSearchPathDirectory enumeration)
NSApplicationDirectory = 1
NSDocumentDirectory = 9
NSDownloadsDirectory = 15
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSPicturesDirectory = 19
class iOSStoragePath(StoragePath):
def __init__(self):
self.defaultManager = NSFileManager.defaultManager()
def _get_home_dir(self):
return os.path.expanduser('~/')
def _get_external_storage_dir(self):
return 'This feature is not implemented for this platform.'
def _get_root_dir(self):
return 'This feature is not implemented for this platform.'
def _get_documents_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDocumentDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_downloads_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDownloadsDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_videos_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMoviesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_music_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMusicDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_pictures_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSPicturesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_application_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSApplicationDirectory, 1).firstObject().absoluteString.UTF8String()
def instance():
return iOSStoragePath()
|
Add iOS api for storage path'''
iOS Storage Path
--------------------
'''
from plyer.facades import StoragePath
from pyobjus import autoclass
import os
NSFileManager = autoclass('NSFileManager')
# Directory constants (NSSearchPathDirectory enumeration)
NSApplicationDirectory = 1
NSDocumentDirectory = 9
NSDownloadsDirectory = 15
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSPicturesDirectory = 19
class iOSStoragePath(StoragePath):
def __init__(self):
self.defaultManager = NSFileManager.defaultManager()
def _get_home_dir(self):
return os.path.expanduser('~/')
def _get_external_storage_dir(self):
return 'This feature is not implemented for this platform.'
def _get_root_dir(self):
return 'This feature is not implemented for this platform.'
def _get_documents_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDocumentDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_downloads_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDownloadsDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_videos_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMoviesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_music_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMusicDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_pictures_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSPicturesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_application_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSApplicationDirectory, 1).firstObject().absoluteString.UTF8String()
def instance():
return iOSStoragePath()
|
<commit_before><commit_msg>Add iOS api for storage path<commit_after>'''
iOS Storage Path
--------------------
'''
from plyer.facades import StoragePath
from pyobjus import autoclass
import os
NSFileManager = autoclass('NSFileManager')
# Directory constants (NSSearchPathDirectory enumeration)
NSApplicationDirectory = 1
NSDocumentDirectory = 9
NSDownloadsDirectory = 15
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSPicturesDirectory = 19
class iOSStoragePath(StoragePath):
def __init__(self):
self.defaultManager = NSFileManager.defaultManager()
def _get_home_dir(self):
return os.path.expanduser('~/')
def _get_external_storage_dir(self):
return 'This feature is not implemented for this platform.'
def _get_root_dir(self):
return 'This feature is not implemented for this platform.'
def _get_documents_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDocumentDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_downloads_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSDownloadsDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_videos_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMoviesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_music_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSMusicDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_pictures_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSPicturesDirectory, 1).firstObject().absoluteString.UTF8String()
def _get_application_dir(self):
return self.defaultManager.URLsForDirectory_inDomains_(
NSApplicationDirectory, 1).firstObject().absoluteString.UTF8String()
def instance():
return iOSStoragePath()
|
|
6a4fb5556a03df7863a8ccf5b91f9b0103c0d9bd
|
admin_extend/form_mixins.py
|
admin_extend/form_mixins.py
|
from django.forms import ModelForm
class BidirectionalM2MForm(ModelForm):
bi_m2m_fields = []
def _get_m2m_attr_name(self, m2m_field):
return '%s_set' % m2m_field
def __init__(self, *args, **kwargs):
super(BidirectionalM2MForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None:
for m2m_field in self.bi_m2m_fields:
self.fields[m2m_field].initial = getattr(
self.instance, self._get_m2m_attr_name(m2m_field)).all()
def save(self, commit=True):
instance = super(BidirectionalM2MForm, self).save(commit=False)
force_save = self.instance.pk is None
if force_save:
instance.save()
for m2m_field in self.bi_m2m_fields:
attr_name = '%s_set' % m2m_field
setattr(self.instance, attr_name, self.cleaned_data[m2m_field])
if commit:
if not force_save:
instance.save()
self.save_m2m()
return instance
|
Add a mixin form that provides bidirectional m2m fields in the admin
|
Add a mixin form that provides bidirectional m2m fields in the admin
|
Python
|
mit
|
kux/django-admin-extend
|
Add a mixin form that provides bidirectional m2m fields in the admin
|
from django.forms import ModelForm
class BidirectionalM2MForm(ModelForm):
bi_m2m_fields = []
def _get_m2m_attr_name(self, m2m_field):
return '%s_set' % m2m_field
def __init__(self, *args, **kwargs):
super(BidirectionalM2MForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None:
for m2m_field in self.bi_m2m_fields:
self.fields[m2m_field].initial = getattr(
self.instance, self._get_m2m_attr_name(m2m_field)).all()
def save(self, commit=True):
instance = super(BidirectionalM2MForm, self).save(commit=False)
force_save = self.instance.pk is None
if force_save:
instance.save()
for m2m_field in self.bi_m2m_fields:
attr_name = '%s_set' % m2m_field
setattr(self.instance, attr_name, self.cleaned_data[m2m_field])
if commit:
if not force_save:
instance.save()
self.save_m2m()
return instance
|
<commit_before><commit_msg>Add a mixin form that provides bidirectional m2m fields in the admin<commit_after>
|
from django.forms import ModelForm
class BidirectionalM2MForm(ModelForm):
bi_m2m_fields = []
def _get_m2m_attr_name(self, m2m_field):
return '%s_set' % m2m_field
def __init__(self, *args, **kwargs):
super(BidirectionalM2MForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None:
for m2m_field in self.bi_m2m_fields:
self.fields[m2m_field].initial = getattr(
self.instance, self._get_m2m_attr_name(m2m_field)).all()
def save(self, commit=True):
instance = super(BidirectionalM2MForm, self).save(commit=False)
force_save = self.instance.pk is None
if force_save:
instance.save()
for m2m_field in self.bi_m2m_fields:
attr_name = '%s_set' % m2m_field
setattr(self.instance, attr_name, self.cleaned_data[m2m_field])
if commit:
if not force_save:
instance.save()
self.save_m2m()
return instance
|
Add a mixin form that provides bidirectional m2m fields in the adminfrom django.forms import ModelForm
class BidirectionalM2MForm(ModelForm):
bi_m2m_fields = []
def _get_m2m_attr_name(self, m2m_field):
return '%s_set' % m2m_field
def __init__(self, *args, **kwargs):
super(BidirectionalM2MForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None:
for m2m_field in self.bi_m2m_fields:
self.fields[m2m_field].initial = getattr(
self.instance, self._get_m2m_attr_name(m2m_field)).all()
def save(self, commit=True):
instance = super(BidirectionalM2MForm, self).save(commit=False)
force_save = self.instance.pk is None
if force_save:
instance.save()
for m2m_field in self.bi_m2m_fields:
attr_name = '%s_set' % m2m_field
setattr(self.instance, attr_name, self.cleaned_data[m2m_field])
if commit:
if not force_save:
instance.save()
self.save_m2m()
return instance
|
<commit_before><commit_msg>Add a mixin form that provides bidirectional m2m fields in the admin<commit_after>from django.forms import ModelForm
class BidirectionalM2MForm(ModelForm):
bi_m2m_fields = []
def _get_m2m_attr_name(self, m2m_field):
return '%s_set' % m2m_field
def __init__(self, *args, **kwargs):
super(BidirectionalM2MForm, self).__init__(*args, **kwargs)
if self.instance.pk is not None:
for m2m_field in self.bi_m2m_fields:
self.fields[m2m_field].initial = getattr(
self.instance, self._get_m2m_attr_name(m2m_field)).all()
def save(self, commit=True):
instance = super(BidirectionalM2MForm, self).save(commit=False)
force_save = self.instance.pk is None
if force_save:
instance.save()
for m2m_field in self.bi_m2m_fields:
attr_name = '%s_set' % m2m_field
setattr(self.instance, attr_name, self.cleaned_data[m2m_field])
if commit:
if not force_save:
instance.save()
self.save_m2m()
return instance
|
|
2ac066511fb7febe0a0dd2f54845e945c639f810
|
py/maximum-width-of-binary-tree.py
|
py/maximum-width-of-binary-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
start, end = 0, 0
q = [(root, 0, 0)]
cd = -1
m = 0
for v, d, c in q:
if d != cd:
start, end = c, c
cd = d
else:
end = c
m = max(m, end - start + 1)
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
return m
|
Add py solution for 662. Maximum Width of Binary Tree
|
Add py solution for 662. Maximum Width of Binary Tree
662. Maximum Width of Binary Tree: https://leetcode.com/problems/maximum-width-of-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 662. Maximum Width of Binary Tree
662. Maximum Width of Binary Tree: https://leetcode.com/problems/maximum-width-of-binary-tree/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
start, end = 0, 0
q = [(root, 0, 0)]
cd = -1
m = 0
for v, d, c in q:
if d != cd:
start, end = c, c
cd = d
else:
end = c
m = max(m, end - start + 1)
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
return m
|
<commit_before><commit_msg>Add py solution for 662. Maximum Width of Binary Tree
662. Maximum Width of Binary Tree: https://leetcode.com/problems/maximum-width-of-binary-tree/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
start, end = 0, 0
q = [(root, 0, 0)]
cd = -1
m = 0
for v, d, c in q:
if d != cd:
start, end = c, c
cd = d
else:
end = c
m = max(m, end - start + 1)
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
return m
|
Add py solution for 662. Maximum Width of Binary Tree
662. Maximum Width of Binary Tree: https://leetcode.com/problems/maximum-width-of-binary-tree/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
start, end = 0, 0
q = [(root, 0, 0)]
cd = -1
m = 0
for v, d, c in q:
if d != cd:
start, end = c, c
cd = d
else:
end = c
m = max(m, end - start + 1)
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
return m
|
<commit_before><commit_msg>Add py solution for 662. Maximum Width of Binary Tree
662. Maximum Width of Binary Tree: https://leetcode.com/problems/maximum-width-of-binary-tree/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
start, end = 0, 0
q = [(root, 0, 0)]
cd = -1
m = 0
for v, d, c in q:
if d != cd:
start, end = c, c
cd = d
else:
end = c
m = max(m, end - start + 1)
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
return m
|
|
725ada91ff4b15aa97784a21e6cebd02fa2dd55c
|
split_dataset.py
|
split_dataset.py
|
import os
import numpy as np
data_dir = "data/dataset/"
jpg_filenames = list(filter(lambda x: x[-3:] == "jpg", os.listdir(data_dir)))
# Randomly select the test dataset
test_percentage = 0.1
n_test = round(len(jpg_filenames) * test_percentage)
if n_test == 0: n_test = 1
# Randomly select the images for testing
test_indexes = np.random.choice(len(jpg_filenames), n_test, replace=False)
jpg_filenames_copy = jpg_filenames[:]
with open("test.txt", "w") as f:
for index in test_indexes:
# Write filename
f.write(data_dir + jpg_filenames[index] + "\n")
# Remove from copy list
jpg_filenames_copy.pop(index)
# Write from the copy list
with open("train.txt", "w") as f:
for filename in jpg_filenames_copy:
f.write(data_dir + filename + "\n")
|
Add script to split the dataset
|
Add script to split the dataset
|
Python
|
mit
|
SetaSouto/license-plate-detection
|
Add script to split the dataset
|
import os
import numpy as np
data_dir = "data/dataset/"
jpg_filenames = list(filter(lambda x: x[-3:] == "jpg", os.listdir(data_dir)))
# Randomly select the test dataset
test_percentage = 0.1
n_test = round(len(jpg_filenames) * test_percentage)
if n_test == 0: n_test = 1
# Randomly select the images for testing
test_indexes = np.random.choice(len(jpg_filenames), n_test, replace=False)
jpg_filenames_copy = jpg_filenames[:]
with open("test.txt", "w") as f:
for index in test_indexes:
# Write filename
f.write(data_dir + jpg_filenames[index] + "\n")
# Remove from copy list
jpg_filenames_copy.pop(index)
# Write from the copy list
with open("train.txt", "w") as f:
for filename in jpg_filenames_copy:
f.write(data_dir + filename + "\n")
|
<commit_before><commit_msg>Add script to split the dataset<commit_after>
|
import os
import numpy as np
data_dir = "data/dataset/"
jpg_filenames = list(filter(lambda x: x[-3:] == "jpg", os.listdir(data_dir)))
# Randomly select the test dataset
test_percentage = 0.1
n_test = round(len(jpg_filenames) * test_percentage)
if n_test == 0: n_test = 1
# Randomly select the images for testing
test_indexes = np.random.choice(len(jpg_filenames), n_test, replace=False)
jpg_filenames_copy = jpg_filenames[:]
with open("test.txt", "w") as f:
for index in test_indexes:
# Write filename
f.write(data_dir + jpg_filenames[index] + "\n")
# Remove from copy list
jpg_filenames_copy.pop(index)
# Write from the copy list
with open("train.txt", "w") as f:
for filename in jpg_filenames_copy:
f.write(data_dir + filename + "\n")
|
Add script to split the datasetimport os
import numpy as np
data_dir = "data/dataset/"
jpg_filenames = list(filter(lambda x: x[-3:] == "jpg", os.listdir(data_dir)))
# Randomly select the test dataset
test_percentage = 0.1
n_test = round(len(jpg_filenames) * test_percentage)
if n_test == 0: n_test = 1
# Randomly select the images for testing
test_indexes = np.random.choice(len(jpg_filenames), n_test, replace=False)
jpg_filenames_copy = jpg_filenames[:]
with open("test.txt", "w") as f:
for index in test_indexes:
# Write filename
f.write(data_dir + jpg_filenames[index] + "\n")
# Remove from copy list
jpg_filenames_copy.pop(index)
# Write from the copy list
with open("train.txt", "w") as f:
for filename in jpg_filenames_copy:
f.write(data_dir + filename + "\n")
|
<commit_before><commit_msg>Add script to split the dataset<commit_after>import os
import numpy as np
data_dir = "data/dataset/"
jpg_filenames = list(filter(lambda x: x[-3:] == "jpg", os.listdir(data_dir)))
# Randomly select the test dataset
test_percentage = 0.1
n_test = round(len(jpg_filenames) * test_percentage)
if n_test == 0: n_test = 1
# Randomly select the images for testing
test_indexes = np.random.choice(len(jpg_filenames), n_test, replace=False)
jpg_filenames_copy = jpg_filenames[:]
with open("test.txt", "w") as f:
for index in test_indexes:
# Write filename
f.write(data_dir + jpg_filenames[index] + "\n")
# Remove from copy list
jpg_filenames_copy.pop(index)
# Write from the copy list
with open("train.txt", "w") as f:
for filename in jpg_filenames_copy:
f.write(data_dir + filename + "\n")
|
|
76d1330c2ebe47e3818139e7eaa2bd1dd906c8f5
|
papermill/tests/test_parameterize.py
|
papermill/tests/test_parameterize.py
|
import unittest
from ..api import read_notebook
from ..execute import _parameterize_notebook
from . import get_notebook_path
class TestNotebookHelpers(unittest.TestCase):
def test_preserving_tags(self):
# test that other tags on the parameter cell are preserved
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('some tag' in cell_zero.get('metadata').get('tags'))
cell_one = test_nb.node.cells[1]
self.assertTrue('some tag' in cell_one.get('metadata').get('tags'))
self.assertTrue('parameters' in cell_one.get('metadata').get('tags'))
def test_default_parameters_tag(self):
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('default parameters'
in cell_zero.get('metadata').get('tags'))
self.assertTrue('parameters'
not in cell_zero.get('metadata').get('tags'))
|
Add tests for tagging of parameter cells
|
Add tests for tagging of parameter cells
|
Python
|
bsd-3-clause
|
nteract/papermill,nteract/papermill
|
Add tests for tagging of parameter cells
|
import unittest
from ..api import read_notebook
from ..execute import _parameterize_notebook
from . import get_notebook_path
class TestNotebookHelpers(unittest.TestCase):
def test_preserving_tags(self):
# test that other tags on the parameter cell are preserved
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('some tag' in cell_zero.get('metadata').get('tags'))
cell_one = test_nb.node.cells[1]
self.assertTrue('some tag' in cell_one.get('metadata').get('tags'))
self.assertTrue('parameters' in cell_one.get('metadata').get('tags'))
def test_default_parameters_tag(self):
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('default parameters'
in cell_zero.get('metadata').get('tags'))
self.assertTrue('parameters'
not in cell_zero.get('metadata').get('tags'))
|
<commit_before><commit_msg>Add tests for tagging of parameter cells<commit_after>
|
import unittest
from ..api import read_notebook
from ..execute import _parameterize_notebook
from . import get_notebook_path
class TestNotebookHelpers(unittest.TestCase):
def test_preserving_tags(self):
# test that other tags on the parameter cell are preserved
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('some tag' in cell_zero.get('metadata').get('tags'))
cell_one = test_nb.node.cells[1]
self.assertTrue('some tag' in cell_one.get('metadata').get('tags'))
self.assertTrue('parameters' in cell_one.get('metadata').get('tags'))
def test_default_parameters_tag(self):
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('default parameters'
in cell_zero.get('metadata').get('tags'))
self.assertTrue('parameters'
not in cell_zero.get('metadata').get('tags'))
|
Add tests for tagging of parameter cellsimport unittest
from ..api import read_notebook
from ..execute import _parameterize_notebook
from . import get_notebook_path
class TestNotebookHelpers(unittest.TestCase):
def test_preserving_tags(self):
# test that other tags on the parameter cell are preserved
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('some tag' in cell_zero.get('metadata').get('tags'))
cell_one = test_nb.node.cells[1]
self.assertTrue('some tag' in cell_one.get('metadata').get('tags'))
self.assertTrue('parameters' in cell_one.get('metadata').get('tags'))
def test_default_parameters_tag(self):
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('default parameters'
in cell_zero.get('metadata').get('tags'))
self.assertTrue('parameters'
not in cell_zero.get('metadata').get('tags'))
|
<commit_before><commit_msg>Add tests for tagging of parameter cells<commit_after>import unittest
from ..api import read_notebook
from ..execute import _parameterize_notebook
from . import get_notebook_path
class TestNotebookHelpers(unittest.TestCase):
def test_preserving_tags(self):
# test that other tags on the parameter cell are preserved
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('some tag' in cell_zero.get('metadata').get('tags'))
cell_one = test_nb.node.cells[1]
self.assertTrue('some tag' in cell_one.get('metadata').get('tags'))
self.assertTrue('parameters' in cell_one.get('metadata').get('tags'))
def test_default_parameters_tag(self):
test_nb = read_notebook(get_notebook_path("simple_execute.ipynb"))
test_nb.node.cells[0]['metadata']['tags'].append('some tag')
_parameterize_notebook(test_nb.node, 'python3', {'msg': 'Hello'})
cell_zero = test_nb.node.cells[0]
self.assertTrue('default parameters'
in cell_zero.get('metadata').get('tags'))
self.assertTrue('parameters'
not in cell_zero.get('metadata').get('tags'))
|
|
102846c941f2faa229db841ffe5031fa779923f3
|
indico/migrations/versions/201704191202_2963fba57558_fix_future_paper_revisions.py
|
indico/migrations/versions/201704191202_2963fba57558_fix_future_paper_revisions.py
|
"""Fix future paper revisions
Revision ID: 2963fba57558
Revises: 098311458f37
Create Date: 2017-04-19 12:02:16.187401
"""
from collections import Counter
from datetime import timedelta
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '2963fba57558'
down_revision = '098311458f37'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
UPDATE event_paper_reviewing.revisions
SET submitted_dt = now()
WHERE submitted_dt > now();
''')
res = conn.execute("""
SELECT contribution_id
FROM event_paper_reviewing.revisions r
JOIN events.contributions c ON (c.id = r.contribution_id)
GROUP BY contribution_id, submitted_dt
HAVING COUNT(*) > 1;
""")
ids = {x.contribution_id for x in res}
stmt = """
SELECT id, submitted_dt
FROM event_paper_reviewing.revisions
WHERE contribution_id = %s
ORDER BY submitted_dt ASC, id ASC
"""
for contrib_id in ids:
res = conn.execute(stmt, (contrib_id,))
times = Counter()
for row in res:
if times[row.submitted_dt] > 0:
dt = row.submitted_dt + timedelta(seconds=times[row.submitted_dt])
conn.execute("UPDATE event_paper_reviewing.revisions SET submitted_dt = %s WHERE id = %s", (dt, row.id))
times[row.submitted_dt] += 1
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
''')
def downgrade():
pass
|
Fix paper revisions with same submitted dt
|
Fix paper revisions with same submitted dt
|
Python
|
mit
|
pferreir/indico,DirkHoffmann/indico,OmeGak/indico,indico/indico,indico/indico,indico/indico,DirkHoffmann/indico,ThiefMaster/indico,ThiefMaster/indico,mvidalgarcia/indico,mic4ael/indico,mic4ael/indico,mvidalgarcia/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,mic4ael/indico,pferreir/indico,mvidalgarcia/indico,pferreir/indico,DirkHoffmann/indico,indico/indico,mic4ael/indico,ThiefMaster/indico,OmeGak/indico,OmeGak/indico,DirkHoffmann/indico,OmeGak/indico
|
Fix paper revisions with same submitted dt
|
"""Fix future paper revisions
Revision ID: 2963fba57558
Revises: 098311458f37
Create Date: 2017-04-19 12:02:16.187401
"""
from collections import Counter
from datetime import timedelta
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '2963fba57558'
down_revision = '098311458f37'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
UPDATE event_paper_reviewing.revisions
SET submitted_dt = now()
WHERE submitted_dt > now();
''')
res = conn.execute("""
SELECT contribution_id
FROM event_paper_reviewing.revisions r
JOIN events.contributions c ON (c.id = r.contribution_id)
GROUP BY contribution_id, submitted_dt
HAVING COUNT(*) > 1;
""")
ids = {x.contribution_id for x in res}
stmt = """
SELECT id, submitted_dt
FROM event_paper_reviewing.revisions
WHERE contribution_id = %s
ORDER BY submitted_dt ASC, id ASC
"""
for contrib_id in ids:
res = conn.execute(stmt, (contrib_id,))
times = Counter()
for row in res:
if times[row.submitted_dt] > 0:
dt = row.submitted_dt + timedelta(seconds=times[row.submitted_dt])
conn.execute("UPDATE event_paper_reviewing.revisions SET submitted_dt = %s WHERE id = %s", (dt, row.id))
times[row.submitted_dt] += 1
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
''')
def downgrade():
pass
|
<commit_before><commit_msg>Fix paper revisions with same submitted dt<commit_after>
|
"""Fix future paper revisions
Revision ID: 2963fba57558
Revises: 098311458f37
Create Date: 2017-04-19 12:02:16.187401
"""
from collections import Counter
from datetime import timedelta
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '2963fba57558'
down_revision = '098311458f37'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
UPDATE event_paper_reviewing.revisions
SET submitted_dt = now()
WHERE submitted_dt > now();
''')
res = conn.execute("""
SELECT contribution_id
FROM event_paper_reviewing.revisions r
JOIN events.contributions c ON (c.id = r.contribution_id)
GROUP BY contribution_id, submitted_dt
HAVING COUNT(*) > 1;
""")
ids = {x.contribution_id for x in res}
stmt = """
SELECT id, submitted_dt
FROM event_paper_reviewing.revisions
WHERE contribution_id = %s
ORDER BY submitted_dt ASC, id ASC
"""
for contrib_id in ids:
res = conn.execute(stmt, (contrib_id,))
times = Counter()
for row in res:
if times[row.submitted_dt] > 0:
dt = row.submitted_dt + timedelta(seconds=times[row.submitted_dt])
conn.execute("UPDATE event_paper_reviewing.revisions SET submitted_dt = %s WHERE id = %s", (dt, row.id))
times[row.submitted_dt] += 1
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
''')
def downgrade():
pass
|
Fix paper revisions with same submitted dt"""Fix future paper revisions
Revision ID: 2963fba57558
Revises: 098311458f37
Create Date: 2017-04-19 12:02:16.187401
"""
from collections import Counter
from datetime import timedelta
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '2963fba57558'
down_revision = '098311458f37'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
UPDATE event_paper_reviewing.revisions
SET submitted_dt = now()
WHERE submitted_dt > now();
''')
res = conn.execute("""
SELECT contribution_id
FROM event_paper_reviewing.revisions r
JOIN events.contributions c ON (c.id = r.contribution_id)
GROUP BY contribution_id, submitted_dt
HAVING COUNT(*) > 1;
""")
ids = {x.contribution_id for x in res}
stmt = """
SELECT id, submitted_dt
FROM event_paper_reviewing.revisions
WHERE contribution_id = %s
ORDER BY submitted_dt ASC, id ASC
"""
for contrib_id in ids:
res = conn.execute(stmt, (contrib_id,))
times = Counter()
for row in res:
if times[row.submitted_dt] > 0:
dt = row.submitted_dt + timedelta(seconds=times[row.submitted_dt])
conn.execute("UPDATE event_paper_reviewing.revisions SET submitted_dt = %s WHERE id = %s", (dt, row.id))
times[row.submitted_dt] += 1
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
''')
def downgrade():
pass
|
<commit_before><commit_msg>Fix paper revisions with same submitted dt<commit_after>"""Fix future paper revisions
Revision ID: 2963fba57558
Revises: 098311458f37
Create Date: 2017-04-19 12:02:16.187401
"""
from collections import Counter
from datetime import timedelta
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '2963fba57558'
down_revision = '098311458f37'
branch_labels = None
depends_on = None
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
UPDATE event_paper_reviewing.revisions
SET submitted_dt = now()
WHERE submitted_dt > now();
''')
res = conn.execute("""
SELECT contribution_id
FROM event_paper_reviewing.revisions r
JOIN events.contributions c ON (c.id = r.contribution_id)
GROUP BY contribution_id, submitted_dt
HAVING COUNT(*) > 1;
""")
ids = {x.contribution_id for x in res}
stmt = """
SELECT id, submitted_dt
FROM event_paper_reviewing.revisions
WHERE contribution_id = %s
ORDER BY submitted_dt ASC, id ASC
"""
for contrib_id in ids:
res = conn.execute(stmt, (contrib_id,))
times = Counter()
for row in res:
if times[row.submitted_dt] > 0:
dt = row.submitted_dt + timedelta(seconds=times[row.submitted_dt])
conn.execute("UPDATE event_paper_reviewing.revisions SET submitted_dt = %s WHERE id = %s", (dt, row.id))
times[row.submitted_dt] += 1
conn.execute('''
UPDATE event_paper_reviewing.revisions
SET submitted_dt = judgment_dt
WHERE judgment_dt < submitted_dt;
''')
def downgrade():
pass
|
|
bb543c6f53983d59edbc6a522ca10d64efd9c42e
|
aids/sorting_and_searching/union_sorted_arrays.py
|
aids/sorting_and_searching/union_sorted_arrays.py
|
'''
In this module, we implement a function which gets the union
of two sorted arrays.
'''
def union_sorted_arrays(arr_1, arr_2):
'''
Return the union of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
result.append(arr_1[i])
i += 1
else:
result.append(arr_2[j])
j += 1
result.extend(arr_1[i:])
result.extend(arr_2[j:])
return result
|
Return union of two sorted arrays
|
Return union of two sorted arrays
|
Python
|
mit
|
ueg1990/aids
|
Return union of two sorted arrays
|
'''
In this module, we implement a function which gets the union
of two sorted arrays.
'''
def union_sorted_arrays(arr_1, arr_2):
'''
Return the union of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
result.append(arr_1[i])
i += 1
else:
result.append(arr_2[j])
j += 1
result.extend(arr_1[i:])
result.extend(arr_2[j:])
return result
|
<commit_before><commit_msg>Return union of two sorted arrays<commit_after>
|
'''
In this module, we implement a function which gets the union
of two sorted arrays.
'''
def union_sorted_arrays(arr_1, arr_2):
'''
Return the union of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
result.append(arr_1[i])
i += 1
else:
result.append(arr_2[j])
j += 1
result.extend(arr_1[i:])
result.extend(arr_2[j:])
return result
|
Return union of two sorted arrays'''
In this module, we implement a function which gets the union
of two sorted arrays.
'''
def union_sorted_arrays(arr_1, arr_2):
'''
Return the union of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
result.append(arr_1[i])
i += 1
else:
result.append(arr_2[j])
j += 1
result.extend(arr_1[i:])
result.extend(arr_2[j:])
return result
|
<commit_before><commit_msg>Return union of two sorted arrays<commit_after>'''
In this module, we implement a function which gets the union
of two sorted arrays.
'''
def union_sorted_arrays(arr_1, arr_2):
'''
Return the union of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
result.append(arr_1[i])
i += 1
else:
result.append(arr_2[j])
j += 1
result.extend(arr_1[i:])
result.extend(arr_2[j:])
return result
|
|
fdf1954e677b6a7b1a76ba7dabe1eb2c79e54744
|
run_analyses_bootstrap.py
|
run_analyses_bootstrap.py
|
"""Run bootstrap analyses"""
from working_functions import *
import multiprocessing
# Datasets not included: Cocoli, Sherman, Luquillo, Shiramaki
dat_list = ['FERP', 'ACA', 'WesternGhats', 'BCI', 'BVSF', 'Lahei',
'LaSelva', 'NC', 'Oosting', 'Serimbu']
# The following analyses assume that analyses in run_analyses.py
# has already been conducted for corresponding datasets.
# 1. Appendix D: Simulate null communities to validate R^2
for dat_name in dat_list:
montecarlo_uniform_SAD_ISD(dat_name, Niter = 100)
# Figure D1
sad_mc = import_bootstrap_file('SAD_mc_rsquare.txt')
isd_mc = import_bootstrap_file('ISD_mc_rsquare.txt')
create_Fig_D1(sad_mc, isd_mc)
# 2. Bootstrap analysis
# Bootstrap SAD
pool = multiprocessing.Pool(8) # Assuming that there are 8 cores
pool.map(bootstrap_SAD, dat_list)
pool.close()
pool.join
# Bootstrap ISD - Note: Can be extremely time-consuming
for dat_name in dat_list:
bootstrap_ISD(dat_name)
# Bootstrap SDR and iISD
for dat_name in dat_list:
bootstrap_SDR_iISD(dat_name)
# Figures E1 & E2
create_Fig_E1()
create_Fig_E2()
|
Add script to run Monte Carlo and bootstrap analyses
|
Add script to run Monte Carlo and bootstrap analyses
|
Python
|
mit
|
weecology/mete-energy,weecology/mete-energy
|
Add script to run Monte Carlo and bootstrap analyses
|
"""Run bootstrap analyses"""
from working_functions import *
import multiprocessing
# Datasets not included: Cocoli, Sherman, Luquillo, Shiramaki
dat_list = ['FERP', 'ACA', 'WesternGhats', 'BCI', 'BVSF', 'Lahei',
'LaSelva', 'NC', 'Oosting', 'Serimbu']
# The following analyses assume that analyses in run_analyses.py
# has already been conducted for corresponding datasets.
# 1. Appendix D: Simulate null communities to validate R^2
for dat_name in dat_list:
montecarlo_uniform_SAD_ISD(dat_name, Niter = 100)
# Figure D1
sad_mc = import_bootstrap_file('SAD_mc_rsquare.txt')
isd_mc = import_bootstrap_file('ISD_mc_rsquare.txt')
create_Fig_D1(sad_mc, isd_mc)
# 2. Bootstrap analysis
# Bootstrap SAD
pool = multiprocessing.Pool(8) # Assuming that there are 8 cores
pool.map(bootstrap_SAD, dat_list)
pool.close()
pool.join
# Bootstrap ISD - Note: Can be extremely time-consuming
for dat_name in dat_list:
bootstrap_ISD(dat_name)
# Bootstrap SDR and iISD
for dat_name in dat_list:
bootstrap_SDR_iISD(dat_name)
# Figures E1 & E2
create_Fig_E1()
create_Fig_E2()
|
<commit_before><commit_msg>Add script to run Monte Carlo and bootstrap analyses<commit_after>
|
"""Run bootstrap analyses"""
from working_functions import *
import multiprocessing
# Datasets not included: Cocoli, Sherman, Luquillo, Shiramaki
dat_list = ['FERP', 'ACA', 'WesternGhats', 'BCI', 'BVSF', 'Lahei',
'LaSelva', 'NC', 'Oosting', 'Serimbu']
# The following analyses assume that analyses in run_analyses.py
# has already been conducted for corresponding datasets.
# 1. Appendix D: Simulate null communities to validate R^2
for dat_name in dat_list:
montecarlo_uniform_SAD_ISD(dat_name, Niter = 100)
# Figure D1
sad_mc = import_bootstrap_file('SAD_mc_rsquare.txt')
isd_mc = import_bootstrap_file('ISD_mc_rsquare.txt')
create_Fig_D1(sad_mc, isd_mc)
# 2. Bootstrap analysis
# Bootstrap SAD
pool = multiprocessing.Pool(8) # Assuming that there are 8 cores
pool.map(bootstrap_SAD, dat_list)
pool.close()
pool.join
# Bootstrap ISD - Note: Can be extremely time-consuming
for dat_name in dat_list:
bootstrap_ISD(dat_name)
# Bootstrap SDR and iISD
for dat_name in dat_list:
bootstrap_SDR_iISD(dat_name)
# Figures E1 & E2
create_Fig_E1()
create_Fig_E2()
|
Add script to run Monte Carlo and bootstrap analyses"""Run bootstrap analyses"""
from working_functions import *
import multiprocessing
# Datasets not included: Cocoli, Sherman, Luquillo, Shiramaki
dat_list = ['FERP', 'ACA', 'WesternGhats', 'BCI', 'BVSF', 'Lahei',
'LaSelva', 'NC', 'Oosting', 'Serimbu']
# The following analyses assume that analyses in run_analyses.py
# has already been conducted for corresponding datasets.
# 1. Appendix D: Simulate null communities to validate R^2
for dat_name in dat_list:
montecarlo_uniform_SAD_ISD(dat_name, Niter = 100)
# Figure D1
sad_mc = import_bootstrap_file('SAD_mc_rsquare.txt')
isd_mc = import_bootstrap_file('ISD_mc_rsquare.txt')
create_Fig_D1(sad_mc, isd_mc)
# 2. Bootstrap analysis
# Bootstrap SAD
pool = multiprocessing.Pool(8) # Assuming that there are 8 cores
pool.map(bootstrap_SAD, dat_list)
pool.close()
pool.join
# Bootstrap ISD - Note: Can be extremely time-consuming
for dat_name in dat_list:
bootstrap_ISD(dat_name)
# Bootstrap SDR and iISD
for dat_name in dat_list:
bootstrap_SDR_iISD(dat_name)
# Figures E1 & E2
create_Fig_E1()
create_Fig_E2()
|
<commit_before><commit_msg>Add script to run Monte Carlo and bootstrap analyses<commit_after>"""Run bootstrap analyses"""
from working_functions import *
import multiprocessing
# Datasets not included: Cocoli, Sherman, Luquillo, Shiramaki
dat_list = ['FERP', 'ACA', 'WesternGhats', 'BCI', 'BVSF', 'Lahei',
'LaSelva', 'NC', 'Oosting', 'Serimbu']
# The following analyses assume that analyses in run_analyses.py
# has already been conducted for corresponding datasets.
# 1. Appendix D: Simulate null communities to validate R^2
for dat_name in dat_list:
montecarlo_uniform_SAD_ISD(dat_name, Niter = 100)
# Figure D1
sad_mc = import_bootstrap_file('SAD_mc_rsquare.txt')
isd_mc = import_bootstrap_file('ISD_mc_rsquare.txt')
create_Fig_D1(sad_mc, isd_mc)
# 2. Bootstrap analysis
# Bootstrap SAD
pool = multiprocessing.Pool(8) # Assuming that there are 8 cores
pool.map(bootstrap_SAD, dat_list)
pool.close()
pool.join
# Bootstrap ISD - Note: Can be extremely time-consuming
for dat_name in dat_list:
bootstrap_ISD(dat_name)
# Bootstrap SDR and iISD
for dat_name in dat_list:
bootstrap_SDR_iISD(dat_name)
# Figures E1 & E2
create_Fig_E1()
create_Fig_E2()
|
|
f576fc39cd3083cd0a4d441e48dd2942aead9b1b
|
salt/states/salt_proxy.py
|
salt/states/salt_proxy.py
|
# -*- coding: utf-8 -*-
import os
import logging
log = logging.getLogger(__name__)
def _proxy_conf_file(proxyfile):
changes = []
success = True
if not os.path.exists(proxyfile):
__salt__['salt_proxy.write_proxy_conf'](path=proxyfile)
try:
msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)
except (OSError, IOError) as err:
success = False
msg = 'Salt Proxy: Error writing proxy file {0}'.format(str(err))
log.error(msg)
changes.append(msg)
changes.append(msg)
log.debug(msg)
else:
msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)
changes.append(msg)
log.debug(msg)
return success, changes
def configure_proxy(proxyname='p8000', start=True, **kwargs):
changes = []
status = True
# write the proxy file if necessary
proxyfile = '/etc/salt/proxy'
status, msg = _proxy_conf_file(proxyfile)
changes.extend(msg)
# start the proxy process
if start:
__salt__['cmd.run'](
'salt-proxy --proxyid={0} -l info -d'.format(proxyname),
timeout=5)
msg = 'Started salt proxy for {0}'.format(proxyname)
changes.append(msg)
log.info(msg)
return {'result': status, 'changes': '\n'.join(changes), 'name': proxyname,
'comment': 'Proxy config messages'}
|
Add state for salt proxy
|
Add state for salt proxy
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add state for salt proxy
|
# -*- coding: utf-8 -*-
import os
import logging
log = logging.getLogger(__name__)
def _proxy_conf_file(proxyfile):
changes = []
success = True
if not os.path.exists(proxyfile):
__salt__['salt_proxy.write_proxy_conf'](path=proxyfile)
try:
msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)
except (OSError, IOError) as err:
success = False
msg = 'Salt Proxy: Error writing proxy file {0}'.format(str(err))
log.error(msg)
changes.append(msg)
changes.append(msg)
log.debug(msg)
else:
msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)
changes.append(msg)
log.debug(msg)
return success, changes
def configure_proxy(proxyname='p8000', start=True, **kwargs):
changes = []
status = True
# write the proxy file if necessary
proxyfile = '/etc/salt/proxy'
status, msg = _proxy_conf_file(proxyfile)
changes.extend(msg)
# start the proxy process
if start:
__salt__['cmd.run'](
'salt-proxy --proxyid={0} -l info -d'.format(proxyname),
timeout=5)
msg = 'Started salt proxy for {0}'.format(proxyname)
changes.append(msg)
log.info(msg)
return {'result': status, 'changes': '\n'.join(changes), 'name': proxyname,
'comment': 'Proxy config messages'}
|
<commit_before><commit_msg>Add state for salt proxy<commit_after>
|
# -*- coding: utf-8 -*-
import os
import logging
log = logging.getLogger(__name__)
def _proxy_conf_file(proxyfile):
changes = []
success = True
if not os.path.exists(proxyfile):
__salt__['salt_proxy.write_proxy_conf'](path=proxyfile)
try:
msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)
except (OSError, IOError) as err:
success = False
msg = 'Salt Proxy: Error writing proxy file {0}'.format(str(err))
log.error(msg)
changes.append(msg)
changes.append(msg)
log.debug(msg)
else:
msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)
changes.append(msg)
log.debug(msg)
return success, changes
def configure_proxy(proxyname='p8000', start=True, **kwargs):
changes = []
status = True
# write the proxy file if necessary
proxyfile = '/etc/salt/proxy'
status, msg = _proxy_conf_file(proxyfile)
changes.extend(msg)
# start the proxy process
if start:
__salt__['cmd.run'](
'salt-proxy --proxyid={0} -l info -d'.format(proxyname),
timeout=5)
msg = 'Started salt proxy for {0}'.format(proxyname)
changes.append(msg)
log.info(msg)
return {'result': status, 'changes': '\n'.join(changes), 'name': proxyname,
'comment': 'Proxy config messages'}
|
Add state for salt proxy# -*- coding: utf-8 -*-
import os
import logging
log = logging.getLogger(__name__)
def _proxy_conf_file(proxyfile):
changes = []
success = True
if not os.path.exists(proxyfile):
__salt__['salt_proxy.write_proxy_conf'](path=proxyfile)
try:
msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)
except (OSError, IOError) as err:
success = False
msg = 'Salt Proxy: Error writing proxy file {0}'.format(str(err))
log.error(msg)
changes.append(msg)
changes.append(msg)
log.debug(msg)
else:
msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)
changes.append(msg)
log.debug(msg)
return success, changes
def configure_proxy(proxyname='p8000', start=True, **kwargs):
changes = []
status = True
# write the proxy file if necessary
proxyfile = '/etc/salt/proxy'
status, msg = _proxy_conf_file(proxyfile)
changes.extend(msg)
# start the proxy process
if start:
__salt__['cmd.run'](
'salt-proxy --proxyid={0} -l info -d'.format(proxyname),
timeout=5)
msg = 'Started salt proxy for {0}'.format(proxyname)
changes.append(msg)
log.info(msg)
return {'result': status, 'changes': '\n'.join(changes), 'name': proxyname,
'comment': 'Proxy config messages'}
|
<commit_before><commit_msg>Add state for salt proxy<commit_after># -*- coding: utf-8 -*-
import os
import logging
log = logging.getLogger(__name__)
def _proxy_conf_file(proxyfile):
changes = []
success = True
if not os.path.exists(proxyfile):
__salt__['salt_proxy.write_proxy_conf'](path=proxyfile)
try:
msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)
except (OSError, IOError) as err:
success = False
msg = 'Salt Proxy: Error writing proxy file {0}'.format(str(err))
log.error(msg)
changes.append(msg)
changes.append(msg)
log.debug(msg)
else:
msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)
changes.append(msg)
log.debug(msg)
return success, changes
def configure_proxy(proxyname='p8000', start=True, **kwargs):
changes = []
status = True
# write the proxy file if necessary
proxyfile = '/etc/salt/proxy'
status, msg = _proxy_conf_file(proxyfile)
changes.extend(msg)
# start the proxy process
if start:
__salt__['cmd.run'](
'salt-proxy --proxyid={0} -l info -d'.format(proxyname),
timeout=5)
msg = 'Started salt proxy for {0}'.format(proxyname)
changes.append(msg)
log.info(msg)
return {'result': status, 'changes': '\n'.join(changes), 'name': proxyname,
'comment': 'Proxy config messages'}
|
|
095a04c051f966c74909c4e2d3a5cd4aea2e124f
|
docs/source/examples/test_no_depends_fails.py
|
docs/source/examples/test_no_depends_fails.py
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum" in out
|
Update expected error message for this test
|
Update expected error message for this test
PR 6370 on the chapel repository caused a change to the error message output
when a Chapel module or enum can't be found in a use statement, resulting in
the failure of this test to match expected output. Update accordingly.
|
Python
|
apache-2.0
|
russel/pychapel,russel/pychapel,chapel-lang/pychapel,chapel-lang/pychapel,russel/pychapel,chapel-lang/pychapel
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
Update expected error message for this test
PR 6370 on the chapel repository caused a change to the error message output
when a Chapel module or enum can't be found in a use statement, resulting in
the failure of this test to match expected output. Update accordingly.
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum" in out
|
<commit_before>from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
<commit_msg>Update expected error message for this test
PR 6370 on the chapel repository caused a change to the error message output
when a Chapel module or enum can't be found in a use statement, resulting in
the failure of this test to match expected output. Update accordingly.<commit_after>
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum" in out
|
from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
Update expected error message for this test
PR 6370 on the chapel repository caused a change to the error message output
when a Chapel module or enum can't be found in a use statement, resulting in
the failure of this test to match expected output. Update accordingly.from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum" in out
|
<commit_before>from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum \'M1\'" in out
<commit_msg>Update expected error message for this test
PR 6370 on the chapel repository caused a change to the error message output
when a Chapel module or enum can't be found in a use statement, resulting in
the failure of this test to match expected output. Update accordingly.<commit_after>from __future__ import print_function
from pych.extern import Chapel
@Chapel(sfile="users.onlyonce.chpl")
def useTwoModules(x=int, y=int):
return int
if __name__ == "__main__":
print(useTwoModules(2, 4))
import testcase
# contains the general testing method, which allows us to gather output
import os.path
def test_using_multiple_modules():
out = testcase.runpy(os.path.realpath(__file__))
# Ensure that when a used module is nowhere near the exported function, we
# get an error message to that effect.
assert "error: Cannot find module or enum" in out
|
81778bb866ca54f8a138c1f8493d7f98799f3dad
|
9_Palindrome_Number.py
|
9_Palindrome_Number.py
|
import math
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
flag = 0
if x < 0:
return False
elif 0 <= x <= 9:
return True
digits = int(math.log10(abs(x)))+1
for i in range(1,digits/2+1):
# print i
# print (x/pow(10,digits-i))%10, (x%pow(10,i))/pow(10,i-1)
if (x/pow(10,digits-i))%10 != (x%pow(10,i))/pow(10,i-1):
flag = 1
break
if flag == 1:
return False
else:
return True
|
Add prob9, night in 9.18
|
Add prob9, night in 9.18
|
Python
|
apache-2.0
|
lzhbrian/Leetcode-Python
|
Add prob9, night in 9.18
|
import math
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
flag = 0
if x < 0:
return False
elif 0 <= x <= 9:
return True
digits = int(math.log10(abs(x)))+1
for i in range(1,digits/2+1):
# print i
# print (x/pow(10,digits-i))%10, (x%pow(10,i))/pow(10,i-1)
if (x/pow(10,digits-i))%10 != (x%pow(10,i))/pow(10,i-1):
flag = 1
break
if flag == 1:
return False
else:
return True
|
<commit_before><commit_msg>Add prob9, night in 9.18<commit_after>
|
import math
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
flag = 0
if x < 0:
return False
elif 0 <= x <= 9:
return True
digits = int(math.log10(abs(x)))+1
for i in range(1,digits/2+1):
# print i
# print (x/pow(10,digits-i))%10, (x%pow(10,i))/pow(10,i-1)
if (x/pow(10,digits-i))%10 != (x%pow(10,i))/pow(10,i-1):
flag = 1
break
if flag == 1:
return False
else:
return True
|
Add prob9, night in 9.18import math
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
flag = 0
if x < 0:
return False
elif 0 <= x <= 9:
return True
digits = int(math.log10(abs(x)))+1
for i in range(1,digits/2+1):
# print i
# print (x/pow(10,digits-i))%10, (x%pow(10,i))/pow(10,i-1)
if (x/pow(10,digits-i))%10 != (x%pow(10,i))/pow(10,i-1):
flag = 1
break
if flag == 1:
return False
else:
return True
|
<commit_before><commit_msg>Add prob9, night in 9.18<commit_after>import math
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
flag = 0
if x < 0:
return False
elif 0 <= x <= 9:
return True
digits = int(math.log10(abs(x)))+1
for i in range(1,digits/2+1):
# print i
# print (x/pow(10,digits-i))%10, (x%pow(10,i))/pow(10,i-1)
if (x/pow(10,digits-i))%10 != (x%pow(10,i))/pow(10,i-1):
flag = 1
break
if flag == 1:
return False
else:
return True
|
|
f64ce86d1dcf402b68f55c2d6f54a00dbba8a1f5
|
examples/colorbars.py
|
examples/colorbars.py
|
"""
This example demonstrates how to make colorful bars.
"""
from rich.block_bar import BlockBar
from rich.console import Console
from rich.table import Table
table = Table()
table.add_column("Score")
table.add_row(BlockBar(size=100, begin=0, end=5, width=30, color="bright_red"))
table.add_row(BlockBar(size=100, begin=0, end=35, width=30, color="bright_yellow"))
table.add_row(BlockBar(size=100, begin=0, end=87, width=30, color="bright_green"))
console = Console()
console.print(table, justify="center")
|
Add example for block bar
|
Add example for block bar
|
Python
|
mit
|
willmcgugan/rich
|
Add example for block bar
|
"""
This example demonstrates how to make colorful bars.
"""
from rich.block_bar import BlockBar
from rich.console import Console
from rich.table import Table
table = Table()
table.add_column("Score")
table.add_row(BlockBar(size=100, begin=0, end=5, width=30, color="bright_red"))
table.add_row(BlockBar(size=100, begin=0, end=35, width=30, color="bright_yellow"))
table.add_row(BlockBar(size=100, begin=0, end=87, width=30, color="bright_green"))
console = Console()
console.print(table, justify="center")
|
<commit_before><commit_msg>Add example for block bar<commit_after>
|
"""
This example demonstrates how to make colorful bars.
"""
from rich.block_bar import BlockBar
from rich.console import Console
from rich.table import Table
table = Table()
table.add_column("Score")
table.add_row(BlockBar(size=100, begin=0, end=5, width=30, color="bright_red"))
table.add_row(BlockBar(size=100, begin=0, end=35, width=30, color="bright_yellow"))
table.add_row(BlockBar(size=100, begin=0, end=87, width=30, color="bright_green"))
console = Console()
console.print(table, justify="center")
|
Add example for block bar"""
This example demonstrates how to make colorful bars.
"""
from rich.block_bar import BlockBar
from rich.console import Console
from rich.table import Table
table = Table()
table.add_column("Score")
table.add_row(BlockBar(size=100, begin=0, end=5, width=30, color="bright_red"))
table.add_row(BlockBar(size=100, begin=0, end=35, width=30, color="bright_yellow"))
table.add_row(BlockBar(size=100, begin=0, end=87, width=30, color="bright_green"))
console = Console()
console.print(table, justify="center")
|
<commit_before><commit_msg>Add example for block bar<commit_after>"""
This example demonstrates how to make colorful bars.
"""
from rich.block_bar import BlockBar
from rich.console import Console
from rich.table import Table
table = Table()
table.add_column("Score")
table.add_row(BlockBar(size=100, begin=0, end=5, width=30, color="bright_red"))
table.add_row(BlockBar(size=100, begin=0, end=35, width=30, color="bright_yellow"))
table.add_row(BlockBar(size=100, begin=0, end=87, width=30, color="bright_green"))
console = Console()
console.print(table, justify="center")
|
|
445d8e2065389990962146acd48955d2f28fd712
|
clowder_server/migrations/0007_alert_expire_at.py
|
clowder_server/migrations/0007_alert_expire_at.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-09 19:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0006_auto_20170504_1834'),
]
operations = [
migrations.AddField(
model_name='alert',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
Add migration for expire at
|
CLOWDER: Add migration for expire at
|
Python
|
agpl-3.0
|
keithhackbarth/clowder_server,keithhackbarth/clowder_server,keithhackbarth/clowder_server,keithhackbarth/clowder_server
|
CLOWDER: Add migration for expire at
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-09 19:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0006_auto_20170504_1834'),
]
operations = [
migrations.AddField(
model_name='alert',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>CLOWDER: Add migration for expire at<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-09 19:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0006_auto_20170504_1834'),
]
operations = [
migrations.AddField(
model_name='alert',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
CLOWDER: Add migration for expire at# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-09 19:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0006_auto_20170504_1834'),
]
operations = [
migrations.AddField(
model_name='alert',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>CLOWDER: Add migration for expire at<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-09 19:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clowder_server', '0006_auto_20170504_1834'),
]
operations = [
migrations.AddField(
model_name='alert',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
|
a7e96f68ad2c222a360ad51d9826268ba2620c9b
|
morse_trainer/test_audio.py
|
morse_trainer/test_audio.py
|
import math
import numpy
import pyaudio
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=440, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
play_tone(stream, frequency=750, length=10)
stream.close()
p.terminate()
|
Test codie to get sound duration right
|
Test codie to get sound duration right
|
Python
|
mit
|
rzzzwilson/morse,rzzzwilson/morse
|
Test codie to get sound duration right
|
import math
import numpy
import pyaudio
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=440, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
play_tone(stream, frequency=750, length=10)
stream.close()
p.terminate()
|
<commit_before><commit_msg>Test codie to get sound duration right<commit_after>
|
import math
import numpy
import pyaudio
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=440, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
play_tone(stream, frequency=750, length=10)
stream.close()
p.terminate()
|
Test codie to get sound duration rightimport math
import numpy
import pyaudio
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=440, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
play_tone(stream, frequency=750, length=10)
stream.close()
p.terminate()
|
<commit_before><commit_msg>Test codie to get sound duration right<commit_after>import math
import numpy
import pyaudio
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
def play_tone(stream, frequency=440, length=1, rate=44100):
chunks = []
chunks.append(sine(frequency, length, rate))
chunk = numpy.concatenate(chunks) * 0.25
stream.write(chunk.astype(numpy.float32).tostring())
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, output=1)
play_tone(stream, frequency=750, length=10)
stream.close()
p.terminate()
|
|
adb2b9d8457756bb4484d06b36b8f5b620d2a34c
|
indra/tools/mechlinker_queries.py
|
indra/tools/mechlinker_queries.py
|
import pickle
from indra.tools.incremental_model import IncrementalModel
from indra.mechlinker import MechLinker
from indra.assemblers import EnglishAssembler
def print_linked_stmt(stmt):
source_txts = []
for source_stmt in stmt.source_stmts:
source_txt = EnglishAssembler([source_stmt]).make_model()
source_txts.append(source_txt)
query_txt = EnglishAssembler([stmt.inferred_stmt]).make_model()
final_txt = 'I know that '
for i, t in enumerate(source_txts):
final_txt += '(%d) %s ' % (i+1, t)
if i < len(source_txts) -1:
final_txt = final_txt[:-2] + ', and '
final_txt += 'Is it therefore true that ' + query_txt[:-1] + '?'
print final_txt
if __name__ == '__main__':
fname = 'models/rasmachine/rem/model.pkl'
model = IncrementalModel(fname)
model.preassemble()
stmts = model.toplevel_stmts
ml = MechLinker(stmts)
linked_stmts = ml.link_statements()
#linked_stmts = pickle.load(open('rasmachine_linked.pkl', 'rb'))
for stmt in linked_stmts:
print_linked_stmt(stmt)
|
Add English assembled mechanism linking questions
|
Add English assembled mechanism linking questions
|
Python
|
bsd-2-clause
|
jmuhlich/indra,bgyori/indra,pvtodorov/indra,johnbachman/belpy,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra,bgyori/indra,bgyori/indra,jmuhlich/indra,sorgerlab/indra,sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,pvtodorov/indra,sorgerlab/belpy,jmuhlich/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy
|
Add English assembled mechanism linking questions
|
import pickle
from indra.tools.incremental_model import IncrementalModel
from indra.mechlinker import MechLinker
from indra.assemblers import EnglishAssembler
def print_linked_stmt(stmt):
source_txts = []
for source_stmt in stmt.source_stmts:
source_txt = EnglishAssembler([source_stmt]).make_model()
source_txts.append(source_txt)
query_txt = EnglishAssembler([stmt.inferred_stmt]).make_model()
final_txt = 'I know that '
for i, t in enumerate(source_txts):
final_txt += '(%d) %s ' % (i+1, t)
if i < len(source_txts) -1:
final_txt = final_txt[:-2] + ', and '
final_txt += 'Is it therefore true that ' + query_txt[:-1] + '?'
print final_txt
if __name__ == '__main__':
fname = 'models/rasmachine/rem/model.pkl'
model = IncrementalModel(fname)
model.preassemble()
stmts = model.toplevel_stmts
ml = MechLinker(stmts)
linked_stmts = ml.link_statements()
#linked_stmts = pickle.load(open('rasmachine_linked.pkl', 'rb'))
for stmt in linked_stmts:
print_linked_stmt(stmt)
|
<commit_before><commit_msg>Add English assembled mechanism linking questions<commit_after>
|
import pickle
from indra.tools.incremental_model import IncrementalModel
from indra.mechlinker import MechLinker
from indra.assemblers import EnglishAssembler
def print_linked_stmt(stmt):
source_txts = []
for source_stmt in stmt.source_stmts:
source_txt = EnglishAssembler([source_stmt]).make_model()
source_txts.append(source_txt)
query_txt = EnglishAssembler([stmt.inferred_stmt]).make_model()
final_txt = 'I know that '
for i, t in enumerate(source_txts):
final_txt += '(%d) %s ' % (i+1, t)
if i < len(source_txts) -1:
final_txt = final_txt[:-2] + ', and '
final_txt += 'Is it therefore true that ' + query_txt[:-1] + '?'
print final_txt
if __name__ == '__main__':
fname = 'models/rasmachine/rem/model.pkl'
model = IncrementalModel(fname)
model.preassemble()
stmts = model.toplevel_stmts
ml = MechLinker(stmts)
linked_stmts = ml.link_statements()
#linked_stmts = pickle.load(open('rasmachine_linked.pkl', 'rb'))
for stmt in linked_stmts:
print_linked_stmt(stmt)
|
Add English assembled mechanism linking questionsimport pickle
from indra.tools.incremental_model import IncrementalModel
from indra.mechlinker import MechLinker
from indra.assemblers import EnglishAssembler
def print_linked_stmt(stmt):
source_txts = []
for source_stmt in stmt.source_stmts:
source_txt = EnglishAssembler([source_stmt]).make_model()
source_txts.append(source_txt)
query_txt = EnglishAssembler([stmt.inferred_stmt]).make_model()
final_txt = 'I know that '
for i, t in enumerate(source_txts):
final_txt += '(%d) %s ' % (i+1, t)
if i < len(source_txts) -1:
final_txt = final_txt[:-2] + ', and '
final_txt += 'Is it therefore true that ' + query_txt[:-1] + '?'
print final_txt
if __name__ == '__main__':
fname = 'models/rasmachine/rem/model.pkl'
model = IncrementalModel(fname)
model.preassemble()
stmts = model.toplevel_stmts
ml = MechLinker(stmts)
linked_stmts = ml.link_statements()
#linked_stmts = pickle.load(open('rasmachine_linked.pkl', 'rb'))
for stmt in linked_stmts:
print_linked_stmt(stmt)
|
<commit_before><commit_msg>Add English assembled mechanism linking questions<commit_after>import pickle
from indra.tools.incremental_model import IncrementalModel
from indra.mechlinker import MechLinker
from indra.assemblers import EnglishAssembler
def print_linked_stmt(stmt):
source_txts = []
for source_stmt in stmt.source_stmts:
source_txt = EnglishAssembler([source_stmt]).make_model()
source_txts.append(source_txt)
query_txt = EnglishAssembler([stmt.inferred_stmt]).make_model()
final_txt = 'I know that '
for i, t in enumerate(source_txts):
final_txt += '(%d) %s ' % (i+1, t)
if i < len(source_txts) -1:
final_txt = final_txt[:-2] + ', and '
final_txt += 'Is it therefore true that ' + query_txt[:-1] + '?'
print final_txt
if __name__ == '__main__':
fname = 'models/rasmachine/rem/model.pkl'
model = IncrementalModel(fname)
model.preassemble()
stmts = model.toplevel_stmts
ml = MechLinker(stmts)
linked_stmts = ml.link_statements()
#linked_stmts = pickle.load(open('rasmachine_linked.pkl', 'rb'))
for stmt in linked_stmts:
print_linked_stmt(stmt)
|
|
a5503cc64a1ef44048c8aa10e36344508076201c
|
src/python/grpcio_tests/tests_aio/unit/channel_ready_test.py
|
src/python/grpcio_tests/tests_aio/unit/channel_ready_test.py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket, test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(listen=False)
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
aio.channel_ready(self._channel))
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
if server is not None:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(aio.channel_ready(self._channel),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
Add unit test for channel ready
|
Add unit test for channel ready
|
Python
|
apache-2.0
|
jtattermusch/grpc,ctiller/grpc,grpc/grpc,jboeuf/grpc,firebase/grpc,ejona86/grpc,firebase/grpc,jboeuf/grpc,vjpai/grpc,jtattermusch/grpc,jtattermusch/grpc,jboeuf/grpc,firebase/grpc,nicolasnoble/grpc,firebase/grpc,nicolasnoble/grpc,vjpai/grpc,firebase/grpc,vjpai/grpc,ejona86/grpc,grpc/grpc,vjpai/grpc,vjpai/grpc,jtattermusch/grpc,donnadionne/grpc,donnadionne/grpc,jtattermusch/grpc,stanley-cheung/grpc,nicolasnoble/grpc,ctiller/grpc,firebase/grpc,stanley-cheung/grpc,vjpai/grpc,vjpai/grpc,ejona86/grpc,grpc/grpc,ejona86/grpc,ctiller/grpc,ctiller/grpc,donnadionne/grpc,grpc/grpc,jboeuf/grpc,donnadionne/grpc,jtattermusch/grpc,grpc/grpc,nicolasnoble/grpc,nicolasnoble/grpc,donnadionne/grpc,donnadionne/grpc,grpc/grpc,donnadionne/grpc,ejona86/grpc,donnadionne/grpc,stanley-cheung/grpc,nicolasnoble/grpc,nicolasnoble/grpc,nicolasnoble/grpc,ejona86/grpc,stanley-cheung/grpc,jtattermusch/grpc,ctiller/grpc,firebase/grpc,vjpai/grpc,jtattermusch/grpc,grpc/grpc,ctiller/grpc,donnadionne/grpc,ejona86/grpc,grpc/grpc,jtattermusch/grpc,grpc/grpc,stanley-cheung/grpc,stanley-cheung/grpc,stanley-cheung/grpc,jboeuf/grpc,firebase/grpc,nicolasnoble/grpc,jtattermusch/grpc,donnadionne/grpc,vjpai/grpc,firebase/grpc,nicolasnoble/grpc,stanley-cheung/grpc,vjpai/grpc,vjpai/grpc,grpc/grpc,ctiller/grpc,grpc/grpc,firebase/grpc,ctiller/grpc,stanley-cheung/grpc,nicolasnoble/grpc,ctiller/grpc,jboeuf/grpc,nicolasnoble/grpc,ejona86/grpc,ctiller/grpc,jboeuf/grpc,jboeuf/grpc,firebase/grpc,ejona86/grpc,donnadionne/grpc,stanley-cheung/grpc,donnadionne/grpc,grpc/grpc,jtattermusch/grpc,jboeuf/grpc,jboeuf/grpc,ctiller/grpc,jboeuf/grpc,firebase/grpc,ctiller/grpc,jboeuf/grpc,ejona86/grpc,ejona86/grpc,jtattermusch/grpc,ejona86/grpc,stanley-cheung/grpc,vjpai/grpc,stanley-cheung/grpc
|
Add unit test for channel ready
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket, test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(listen=False)
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
aio.channel_ready(self._channel))
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
if server is not None:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(aio.channel_ready(self._channel),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit test for channel ready<commit_after>
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket, test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(listen=False)
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
aio.channel_ready(self._channel))
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
if server is not None:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(aio.channel_ready(self._channel),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
Add unit test for channel ready# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket, test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(listen=False)
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
aio.channel_ready(self._channel))
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
if server is not None:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(aio.channel_ready(self._channel),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit test for channel ready<commit_after># Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the channel_ready function."""
import asyncio
import gc
import logging
import time
import unittest
import grpc
from grpc.experimental import aio
from tests.unit.framework.common import get_socket, test_constants
from tests_aio.unit import _common
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
class TestChannelReady(AioTestBase):
async def setUp(self):
address, self._port, self._socket = get_socket(listen=False)
self._channel = aio.insecure_channel(f"{address}:{self._port}")
self._socket.close()
async def tearDown(self):
await self._channel.close()
async def test_channel_ready_success(self):
# Start `channel_ready` as another Task
channel_ready_task = self.loop.create_task(
aio.channel_ready(self._channel))
# Wait for TRANSIENT_FAILURE
await _common.block_until_certain_state(
self._channel, grpc.ChannelConnectivity.TRANSIENT_FAILURE)
try:
# Start the server
_, server = await start_test_server(port=self._port)
# The RPC should recover itself
await channel_ready_task
finally:
if server is not None:
await server.stop(None)
async def test_channel_ready_blocked(self):
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(aio.channel_ready(self._channel),
test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
|
98ae7ef4cdb51252733535d2314664333957eda3
|
tests/test_features.py
|
tests/test_features.py
|
import numpy as np
import pytest
from microscopium import features
@pytest.fixture(scope="module", params=[np.uint8, np.float])
def haralick_image(request):
haralick_image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]]).astype(request.param)
return haralick_image
def test_haralick_features_8bit(haralick_image):
fs, names = features.haralick_features(haralick_image,
distances=[5],
angles=[0])
expected_names = [
'haralick-contrast-distance5-angle0',
'haralick-dissimilarity-distance5-angle0',
'haralick-homogeneity-distance5-angle0',
'haralick-ASM-distance5-angle0',
'haralick-energy-distance5-angle0',
'haralick-correlation-distance5-angle0']
expected_features = np.array([0., 0., 0., 0., 0., 1.])
assert np.allclose(fs, expected_features)
assert names == expected_names
|
Add unit test for haralick_features()
|
Add unit test for haralick_features()
|
Python
|
bsd-3-clause
|
jni/microscopium,jni/microscopium,microscopium/microscopium,microscopium/microscopium
|
Add unit test for haralick_features()
|
import numpy as np
import pytest
from microscopium import features
@pytest.fixture(scope="module", params=[np.uint8, np.float])
def haralick_image(request):
haralick_image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]]).astype(request.param)
return haralick_image
def test_haralick_features_8bit(haralick_image):
fs, names = features.haralick_features(haralick_image,
distances=[5],
angles=[0])
expected_names = [
'haralick-contrast-distance5-angle0',
'haralick-dissimilarity-distance5-angle0',
'haralick-homogeneity-distance5-angle0',
'haralick-ASM-distance5-angle0',
'haralick-energy-distance5-angle0',
'haralick-correlation-distance5-angle0']
expected_features = np.array([0., 0., 0., 0., 0., 1.])
assert np.allclose(fs, expected_features)
assert names == expected_names
|
<commit_before><commit_msg>Add unit test for haralick_features()<commit_after>
|
import numpy as np
import pytest
from microscopium import features
@pytest.fixture(scope="module", params=[np.uint8, np.float])
def haralick_image(request):
haralick_image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]]).astype(request.param)
return haralick_image
def test_haralick_features_8bit(haralick_image):
fs, names = features.haralick_features(haralick_image,
distances=[5],
angles=[0])
expected_names = [
'haralick-contrast-distance5-angle0',
'haralick-dissimilarity-distance5-angle0',
'haralick-homogeneity-distance5-angle0',
'haralick-ASM-distance5-angle0',
'haralick-energy-distance5-angle0',
'haralick-correlation-distance5-angle0']
expected_features = np.array([0., 0., 0., 0., 0., 1.])
assert np.allclose(fs, expected_features)
assert names == expected_names
|
Add unit test for haralick_features()import numpy as np
import pytest
from microscopium import features
@pytest.fixture(scope="module", params=[np.uint8, np.float])
def haralick_image(request):
haralick_image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]]).astype(request.param)
return haralick_image
def test_haralick_features_8bit(haralick_image):
fs, names = features.haralick_features(haralick_image,
distances=[5],
angles=[0])
expected_names = [
'haralick-contrast-distance5-angle0',
'haralick-dissimilarity-distance5-angle0',
'haralick-homogeneity-distance5-angle0',
'haralick-ASM-distance5-angle0',
'haralick-energy-distance5-angle0',
'haralick-correlation-distance5-angle0']
expected_features = np.array([0., 0., 0., 0., 0., 1.])
assert np.allclose(fs, expected_features)
assert names == expected_names
|
<commit_before><commit_msg>Add unit test for haralick_features()<commit_after>import numpy as np
import pytest
from microscopium import features
@pytest.fixture(scope="module", params=[np.uint8, np.float])
def haralick_image(request):
haralick_image = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 2, 2, 2],
[2, 2, 3, 3]]).astype(request.param)
return haralick_image
def test_haralick_features_8bit(haralick_image):
fs, names = features.haralick_features(haralick_image,
distances=[5],
angles=[0])
expected_names = [
'haralick-contrast-distance5-angle0',
'haralick-dissimilarity-distance5-angle0',
'haralick-homogeneity-distance5-angle0',
'haralick-ASM-distance5-angle0',
'haralick-energy-distance5-angle0',
'haralick-correlation-distance5-angle0']
expected_features = np.array([0., 0., 0., 0., 0., 1.])
assert np.allclose(fs, expected_features)
assert names == expected_names
|
|
b8500132def1f7df0daa9b8abb070ae9d49d1a4e
|
tests/test_int_case.py
|
tests/test_int_case.py
|
from test_util import *
from funkyyak import grad
def test_int_case():
check_equivalent((lambda x:x*x)(2.0), 4.0)
check_equivalent((lambda x:x*x)(2) + 0.0, 4.0)
check_equivalent(grad(lambda x:x*x)(2.0), 4.0)
check_equivalent(grad(lambda x:x*x)(2) + 0.0, 4.0)
|
Test that an int arg to grad is okay
|
Test that an int arg to grad is okay
|
Python
|
mit
|
barak/autograd
|
Test that an int arg to grad is okay
|
from test_util import *
from funkyyak import grad
def test_int_case():
check_equivalent((lambda x:x*x)(2.0), 4.0)
check_equivalent((lambda x:x*x)(2) + 0.0, 4.0)
check_equivalent(grad(lambda x:x*x)(2.0), 4.0)
check_equivalent(grad(lambda x:x*x)(2) + 0.0, 4.0)
|
<commit_before><commit_msg>Test that an int arg to grad is okay<commit_after>
|
from test_util import *
from funkyyak import grad
def test_int_case():
check_equivalent((lambda x:x*x)(2.0), 4.0)
check_equivalent((lambda x:x*x)(2) + 0.0, 4.0)
check_equivalent(grad(lambda x:x*x)(2.0), 4.0)
check_equivalent(grad(lambda x:x*x)(2) + 0.0, 4.0)
|
Test that an int arg to grad is okayfrom test_util import *
from funkyyak import grad
def test_int_case():
check_equivalent((lambda x:x*x)(2.0), 4.0)
check_equivalent((lambda x:x*x)(2) + 0.0, 4.0)
check_equivalent(grad(lambda x:x*x)(2.0), 4.0)
check_equivalent(grad(lambda x:x*x)(2) + 0.0, 4.0)
|
<commit_before><commit_msg>Test that an int arg to grad is okay<commit_after>from test_util import *
from funkyyak import grad
def test_int_case():
check_equivalent((lambda x:x*x)(2.0), 4.0)
check_equivalent((lambda x:x*x)(2) + 0.0, 4.0)
check_equivalent(grad(lambda x:x*x)(2.0), 4.0)
check_equivalent(grad(lambda x:x*x)(2) + 0.0, 4.0)
|
|
e641a19f16b99425aa1b15bd8524f2612b0d6bab
|
tests/test_registry.py
|
tests/test_registry.py
|
import pytest
from web_test_base import *
class TestIATIRegistry(WebTestBase):
urls_to_get = [
"http://iatiregistry.org/"
, "http://www.iatiregistry.org/"
, "https://iatiregistry.org/"
, "https://www.iatiregistry.org/"
]
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = self._get_links_from_page(loaded_request)
assert "http://www.aidtransparency.net/" in result
assert "http://www.iatistandard.org/" in result
|
Add tests for the IATI Registry This adds a 200 response and link checks for the IATI Registry
|
Add tests for the IATI Registry
This adds a 200 response and link checks for the IATI Registry
|
Python
|
mit
|
IATI/IATI-Website-Tests
|
Add tests for the IATI Registry
This adds a 200 response and link checks for the IATI Registry
|
import pytest
from web_test_base import *
class TestIATIRegistry(WebTestBase):
urls_to_get = [
"http://iatiregistry.org/"
, "http://www.iatiregistry.org/"
, "https://iatiregistry.org/"
, "https://www.iatiregistry.org/"
]
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = self._get_links_from_page(loaded_request)
assert "http://www.aidtransparency.net/" in result
assert "http://www.iatistandard.org/" in result
|
<commit_before><commit_msg>Add tests for the IATI Registry
This adds a 200 response and link checks for the IATI Registry<commit_after>
|
import pytest
from web_test_base import *
class TestIATIRegistry(WebTestBase):
urls_to_get = [
"http://iatiregistry.org/"
, "http://www.iatiregistry.org/"
, "https://iatiregistry.org/"
, "https://www.iatiregistry.org/"
]
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = self._get_links_from_page(loaded_request)
assert "http://www.aidtransparency.net/" in result
assert "http://www.iatistandard.org/" in result
|
Add tests for the IATI Registry
This adds a 200 response and link checks for the IATI Registryimport pytest
from web_test_base import *
class TestIATIRegistry(WebTestBase):
urls_to_get = [
"http://iatiregistry.org/"
, "http://www.iatiregistry.org/"
, "https://iatiregistry.org/"
, "https://www.iatiregistry.org/"
]
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = self._get_links_from_page(loaded_request)
assert "http://www.aidtransparency.net/" in result
assert "http://www.iatistandard.org/" in result
|
<commit_before><commit_msg>Add tests for the IATI Registry
This adds a 200 response and link checks for the IATI Registry<commit_after>import pytest
from web_test_base import *
class TestIATIRegistry(WebTestBase):
urls_to_get = [
"http://iatiregistry.org/"
, "http://www.iatiregistry.org/"
, "https://iatiregistry.org/"
, "https://www.iatiregistry.org/"
]
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = self._get_links_from_page(loaded_request)
assert "http://www.aidtransparency.net/" in result
assert "http://www.iatistandard.org/" in result
|
|
da9e0365369e647c6bd386e53fff11715c6d957a
|
libnamebench/config_test.py
|
libnamebench/config_test.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing
|
Add some tests for dns config parsing
|
Python
|
apache-2.0
|
HerlonNascimento/namebench,benklaasen/namebench,21winner/namebench,souzainf3/namebench,lukasfenix/namebench,kiseok7/namebench,dimazalfrianz/namebench,PyroShark/namebench,RomanHargrave/namebench,jackjshin/namebench,mspringett/namebench,ItsAGeekThing/namebench,ZuluPro/namebench,MarnuLombard/namebench,pombreda/namebench,snailbob/namebench,tectronics/namebench,accomac/namebench,dsjr2006/namebench,felipsmartins/namebench,llaera/namebench,tushevorg/namebench,cvanwie/namebench,repomain/namebench,phy0/namebench,Jeff-Lewis/namebench,Forgen/namebench,iamang/namebench,siripuramrk/namebench,feardax/namebench,edumatos/namebench,melissaihrig/namebench,ajitsonlion/namebench,Jasoning/namebench,stefrobb/namebench,woozzoom/namebench,sund/namebench,jaechankim/namebench,MANICX100/namebench,jaded44/namebench,GLMeece/namebench,jlobaton/namebench,omerhasan/namebench,chosen1/namebench,BeZazz/lamebench,antar2801/namebench,AViisiion/namebench,skuarch/namebench,shannonjlove/namebench,perrytm/namebench,yiyuandao/namebench,petabytekr/namebench,TheNite/namebench,TorpedoXL/namebench,arjun372/namebench,FatBumbleee/namebench,teknix/namebench,sushifant/namebench,CookiesandCake/namebench,ericmckean/namebench,when30/namebench,Ritvik1512/namebench,deepak5/namebench,watchamakulit02/namebench,santoshsahoo/namebench,fbidu/namebench,Bandito43/namebench,jtrag/namebench,nadeemat/namebench,richardgroves/namebench,el-lumbergato/namebench,antsant/namebench,nt1st/namebench,corruptnova/namebench,cloudcache/namebench,hitrust/namebench,vishnunuk/namebench,Spindletop16/namebench,chamakov/namebench,uwevil/namebench,aman-tugnawat/namebench,thiagomagero/namebench,bluemask2001/namebench,imranrony/namebench,webhost/namebench,jjoaonunes/namebench,pyshcoder/namebench,hwuiwon/namebench,techsd/namebench,illAdvised/namebench,ajs124/namebench,xubayer786/namebench,renatogames2/namebench,pacav69/namebench,beermix/namebench,palimadra/namebench,gdbdzgd/namebench,Hazer/namebench,Trinitaria/namebench,Kudeshido/namebench,alexlovelltroy/namebench,DanielAttia/namebench,isoriss123/namebench,rbenjamin/namebench,bgammill/namebench,nishad/namebench,cartersgenes/namebench,KingPsychopath/namebench,AdamHull/namebench,Xeleste/namebench,KibaAmor/namebench,doadin/namebench,doantranhoang/namebench,thatchristoph/namebench,asolfre/namebench,evelynmitchell/namebench,ulaskaraoren/namebench,sbalun/namebench,jimb0616/namebench,xeoron/namebench,kevinxw/namebench,MicroWorldwide/namebench,wa111/namebench,xxhank/namebench,cah0211/namebench,alebcay/namebench,RichardWilliamPearse/namebench,fevangelou/namebench,unreal666/namebench,edesiocs/namebench,wluizguedes/namebench,rubasben/namebench,LavyshAlexander/namebench,rosemead/namebench,renanrodm/namebench,gavinfaux/namebench,erasilva/namebench,razrichter/namebench,kristi29091988/namebench,LegitSavage/namebench,ronzohan/namebench,ran0101/namebench,takuya/namebench,eladelad/namebench,thanhuwng/namebench,manaure/namebench,movermeyer/namebench,donavoncade/namebench,etxc/namebench,jakeylube95/namebench,Arrowofdarkness/namebench,hypnotika/namebench,CrazeeIvan/namebench,leeoo/namebench,cyranodb/namebench,trulow/namebench,crocleco/namebench,jevgen/namebench,danieljl/namebench,tcffisher/namebench,seshin/namebench,deeb230/namebench,mystique1029/namebench,qbektrix/namebench,Max-Vader/namebench,hashem78/namebench,edmilson19/namebench,AgentN/namebench,michaeldavidcarr/namebench
|
Add some tests for dns config parsing
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
|
9da74729bd2d48d2bf4e78f342c6eb04bc4f9c69
|
ideascube/conf/idc.py
|
ideascube/conf/idc.py
|
from .base import * # pragma: no flakes
from tzlocal import get_localzone
TIME_ZONE = get_localzone().zone
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS # pragma: no flakes
if c['url'] in ['user_list', 'server:settings']]
BUILTIN_APP_CARDS = ['dropcube', 'blog', 'mediacenter']
EXTRA_APP_CARDS = ['kolibri']
|
Add configuration for ideascube hw
|
Add configuration for ideascube hw
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add configuration for ideascube hw
|
from .base import * # pragma: no flakes
from tzlocal import get_localzone
TIME_ZONE = get_localzone().zone
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS # pragma: no flakes
if c['url'] in ['user_list', 'server:settings']]
BUILTIN_APP_CARDS = ['dropcube', 'blog', 'mediacenter']
EXTRA_APP_CARDS = ['kolibri']
|
<commit_before><commit_msg>Add configuration for ideascube hw<commit_after>
|
from .base import * # pragma: no flakes
from tzlocal import get_localzone
TIME_ZONE = get_localzone().zone
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS # pragma: no flakes
if c['url'] in ['user_list', 'server:settings']]
BUILTIN_APP_CARDS = ['dropcube', 'blog', 'mediacenter']
EXTRA_APP_CARDS = ['kolibri']
|
Add configuration for ideascube hwfrom .base import * # pragma: no flakes
from tzlocal import get_localzone
TIME_ZONE = get_localzone().zone
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS # pragma: no flakes
if c['url'] in ['user_list', 'server:settings']]
BUILTIN_APP_CARDS = ['dropcube', 'blog', 'mediacenter']
EXTRA_APP_CARDS = ['kolibri']
|
<commit_before><commit_msg>Add configuration for ideascube hw<commit_after>from .base import * # pragma: no flakes
from tzlocal import get_localzone
TIME_ZONE = get_localzone().zone
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS # pragma: no flakes
if c['url'] in ['user_list', 'server:settings']]
BUILTIN_APP_CARDS = ['dropcube', 'blog', 'mediacenter']
EXTRA_APP_CARDS = ['kolibri']
|
|
5d4e8bb02dd78f8ba6eb386478f25a6e60c80240
|
atoman/slowtests/test_filtering.py
|
atoman/slowtests/test_filtering.py
|
"""
Slow tests for filtering systems
"""
import os
import tempfile
import shutil
import numpy as np
from . import base
from ..gui import mainWindow
from ..system.lattice import Lattice
def path_to_file(path):
return os.path.join(os.path.dirname(__file__), "..", "..", "testing", path)
class TestFilteringKennyLattice(base.UsesQApplication):
"""
Test filtering a system
"""
def setUp(self):
"""
Set up the test
"""
super(TestFilteringKennyLattice, self).setUp()
# tmp dir
self.tmpLocation = tempfile.mkdtemp(prefix="atomanTest")
# main window
self.mw = mainWindow.MainWindow(None, testing=True)
self.mw.preferences.renderingForm.maxAtomsAutoRun = 0
self.mw.show()
# copy a lattice to tmpLocation
self.fn = os.path.join(self.tmpLocation, "testLattice.dat")
shutil.copy(path_to_file("kenny_lattice.dat"), self.fn)
# load Lattice
try:
self.mw.systemsDialog.load_system_form.readerForm.openFile(self.fn)
state = self.mw.mainToolbar.pipelineList[0].inputState
err = False
if not isinstance(state, Lattice):
err = True
elif state.NAtoms != 1140:
err = True
if err:
self.fail("Loading Lattice failed")
except:
self.fail("Loading Lattice failed")
def tearDown(self):
"""
Tidy up
"""
super(TestFilteringKennyLattice, self).tearDown()
# remove refs
self.fn = None
self.mw.close()
self.mw = None
# remove tmp dir
shutil.rmtree(self.tmpLocation)
def test_filterAtomID(self):
"""
GUI: filter atom ID
"""
# add the atom ID filter
pp = self.mw.mainToolbar.pipelineList[0]
flist = pp.filterLists[0]
flist.addFilter(filterName="Atom ID")
item = flist.listItems.item(0)
item.filterSettings.lineEdit.setText("104,1,4-7")
item.filterSettings.lineEdit.editingFinished.emit()
# run the filter
pp.runAllFilterLists()
# check the result
flt = flist.filterer
atomids = (104, 1, 4, 5, 6, 7)
self.assertEqual(len(flt.visibleAtoms), 6)
for i in xrange(6):
self.assertTrue(pp.inputState.atomID[flt.visibleAtoms[i]] in atomids)
|
Add file for running gui filtering tests.
|
Add file for running gui filtering tests.
|
Python
|
mit
|
chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman
|
Add file for running gui filtering tests.
|
"""
Slow tests for filtering systems
"""
import os
import tempfile
import shutil
import numpy as np
from . import base
from ..gui import mainWindow
from ..system.lattice import Lattice
def path_to_file(path):
return os.path.join(os.path.dirname(__file__), "..", "..", "testing", path)
class TestFilteringKennyLattice(base.UsesQApplication):
"""
Test filtering a system
"""
def setUp(self):
"""
Set up the test
"""
super(TestFilteringKennyLattice, self).setUp()
# tmp dir
self.tmpLocation = tempfile.mkdtemp(prefix="atomanTest")
# main window
self.mw = mainWindow.MainWindow(None, testing=True)
self.mw.preferences.renderingForm.maxAtomsAutoRun = 0
self.mw.show()
# copy a lattice to tmpLocation
self.fn = os.path.join(self.tmpLocation, "testLattice.dat")
shutil.copy(path_to_file("kenny_lattice.dat"), self.fn)
# load Lattice
try:
self.mw.systemsDialog.load_system_form.readerForm.openFile(self.fn)
state = self.mw.mainToolbar.pipelineList[0].inputState
err = False
if not isinstance(state, Lattice):
err = True
elif state.NAtoms != 1140:
err = True
if err:
self.fail("Loading Lattice failed")
except:
self.fail("Loading Lattice failed")
def tearDown(self):
"""
Tidy up
"""
super(TestFilteringKennyLattice, self).tearDown()
# remove refs
self.fn = None
self.mw.close()
self.mw = None
# remove tmp dir
shutil.rmtree(self.tmpLocation)
def test_filterAtomID(self):
"""
GUI: filter atom ID
"""
# add the atom ID filter
pp = self.mw.mainToolbar.pipelineList[0]
flist = pp.filterLists[0]
flist.addFilter(filterName="Atom ID")
item = flist.listItems.item(0)
item.filterSettings.lineEdit.setText("104,1,4-7")
item.filterSettings.lineEdit.editingFinished.emit()
# run the filter
pp.runAllFilterLists()
# check the result
flt = flist.filterer
atomids = (104, 1, 4, 5, 6, 7)
self.assertEqual(len(flt.visibleAtoms), 6)
for i in xrange(6):
self.assertTrue(pp.inputState.atomID[flt.visibleAtoms[i]] in atomids)
|
<commit_before><commit_msg>Add file for running gui filtering tests.<commit_after>
|
"""
Slow tests for filtering systems
"""
import os
import tempfile
import shutil
import numpy as np
from . import base
from ..gui import mainWindow
from ..system.lattice import Lattice
def path_to_file(path):
return os.path.join(os.path.dirname(__file__), "..", "..", "testing", path)
class TestFilteringKennyLattice(base.UsesQApplication):
"""
Test filtering a system
"""
def setUp(self):
"""
Set up the test
"""
super(TestFilteringKennyLattice, self).setUp()
# tmp dir
self.tmpLocation = tempfile.mkdtemp(prefix="atomanTest")
# main window
self.mw = mainWindow.MainWindow(None, testing=True)
self.mw.preferences.renderingForm.maxAtomsAutoRun = 0
self.mw.show()
# copy a lattice to tmpLocation
self.fn = os.path.join(self.tmpLocation, "testLattice.dat")
shutil.copy(path_to_file("kenny_lattice.dat"), self.fn)
# load Lattice
try:
self.mw.systemsDialog.load_system_form.readerForm.openFile(self.fn)
state = self.mw.mainToolbar.pipelineList[0].inputState
err = False
if not isinstance(state, Lattice):
err = True
elif state.NAtoms != 1140:
err = True
if err:
self.fail("Loading Lattice failed")
except:
self.fail("Loading Lattice failed")
def tearDown(self):
"""
Tidy up
"""
super(TestFilteringKennyLattice, self).tearDown()
# remove refs
self.fn = None
self.mw.close()
self.mw = None
# remove tmp dir
shutil.rmtree(self.tmpLocation)
def test_filterAtomID(self):
"""
GUI: filter atom ID
"""
# add the atom ID filter
pp = self.mw.mainToolbar.pipelineList[0]
flist = pp.filterLists[0]
flist.addFilter(filterName="Atom ID")
item = flist.listItems.item(0)
item.filterSettings.lineEdit.setText("104,1,4-7")
item.filterSettings.lineEdit.editingFinished.emit()
# run the filter
pp.runAllFilterLists()
# check the result
flt = flist.filterer
atomids = (104, 1, 4, 5, 6, 7)
self.assertEqual(len(flt.visibleAtoms), 6)
for i in xrange(6):
self.assertTrue(pp.inputState.atomID[flt.visibleAtoms[i]] in atomids)
|
Add file for running gui filtering tests.
"""
Slow tests for filtering systems
"""
import os
import tempfile
import shutil
import numpy as np
from . import base
from ..gui import mainWindow
from ..system.lattice import Lattice
def path_to_file(path):
return os.path.join(os.path.dirname(__file__), "..", "..", "testing", path)
class TestFilteringKennyLattice(base.UsesQApplication):
"""
Test filtering a system
"""
def setUp(self):
"""
Set up the test
"""
super(TestFilteringKennyLattice, self).setUp()
# tmp dir
self.tmpLocation = tempfile.mkdtemp(prefix="atomanTest")
# main window
self.mw = mainWindow.MainWindow(None, testing=True)
self.mw.preferences.renderingForm.maxAtomsAutoRun = 0
self.mw.show()
# copy a lattice to tmpLocation
self.fn = os.path.join(self.tmpLocation, "testLattice.dat")
shutil.copy(path_to_file("kenny_lattice.dat"), self.fn)
# load Lattice
try:
self.mw.systemsDialog.load_system_form.readerForm.openFile(self.fn)
state = self.mw.mainToolbar.pipelineList[0].inputState
err = False
if not isinstance(state, Lattice):
err = True
elif state.NAtoms != 1140:
err = True
if err:
self.fail("Loading Lattice failed")
except:
self.fail("Loading Lattice failed")
def tearDown(self):
"""
Tidy up
"""
super(TestFilteringKennyLattice, self).tearDown()
# remove refs
self.fn = None
self.mw.close()
self.mw = None
# remove tmp dir
shutil.rmtree(self.tmpLocation)
def test_filterAtomID(self):
"""
GUI: filter atom ID
"""
# add the atom ID filter
pp = self.mw.mainToolbar.pipelineList[0]
flist = pp.filterLists[0]
flist.addFilter(filterName="Atom ID")
item = flist.listItems.item(0)
item.filterSettings.lineEdit.setText("104,1,4-7")
item.filterSettings.lineEdit.editingFinished.emit()
# run the filter
pp.runAllFilterLists()
# check the result
flt = flist.filterer
atomids = (104, 1, 4, 5, 6, 7)
self.assertEqual(len(flt.visibleAtoms), 6)
for i in xrange(6):
self.assertTrue(pp.inputState.atomID[flt.visibleAtoms[i]] in atomids)
|
<commit_before><commit_msg>Add file for running gui filtering tests.<commit_after>
"""
Slow tests for filtering systems
"""
import os
import tempfile
import shutil
import numpy as np
from . import base
from ..gui import mainWindow
from ..system.lattice import Lattice
def path_to_file(path):
return os.path.join(os.path.dirname(__file__), "..", "..", "testing", path)
class TestFilteringKennyLattice(base.UsesQApplication):
"""
Test filtering a system
"""
def setUp(self):
"""
Set up the test
"""
super(TestFilteringKennyLattice, self).setUp()
# tmp dir
self.tmpLocation = tempfile.mkdtemp(prefix="atomanTest")
# main window
self.mw = mainWindow.MainWindow(None, testing=True)
self.mw.preferences.renderingForm.maxAtomsAutoRun = 0
self.mw.show()
# copy a lattice to tmpLocation
self.fn = os.path.join(self.tmpLocation, "testLattice.dat")
shutil.copy(path_to_file("kenny_lattice.dat"), self.fn)
# load Lattice
try:
self.mw.systemsDialog.load_system_form.readerForm.openFile(self.fn)
state = self.mw.mainToolbar.pipelineList[0].inputState
err = False
if not isinstance(state, Lattice):
err = True
elif state.NAtoms != 1140:
err = True
if err:
self.fail("Loading Lattice failed")
except:
self.fail("Loading Lattice failed")
def tearDown(self):
"""
Tidy up
"""
super(TestFilteringKennyLattice, self).tearDown()
# remove refs
self.fn = None
self.mw.close()
self.mw = None
# remove tmp dir
shutil.rmtree(self.tmpLocation)
def test_filterAtomID(self):
"""
GUI: filter atom ID
"""
# add the atom ID filter
pp = self.mw.mainToolbar.pipelineList[0]
flist = pp.filterLists[0]
flist.addFilter(filterName="Atom ID")
item = flist.listItems.item(0)
item.filterSettings.lineEdit.setText("104,1,4-7")
item.filterSettings.lineEdit.editingFinished.emit()
# run the filter
pp.runAllFilterLists()
# check the result
flt = flist.filterer
atomids = (104, 1, 4, 5, 6, 7)
self.assertEqual(len(flt.visibleAtoms), 6)
for i in xrange(6):
self.assertTrue(pp.inputState.atomID[flt.visibleAtoms[i]] in atomids)
|
|
c44eb565d167f3c91350da825005067784428440
|
bin/debug/extract_timeline_for_day_range_and_user.py
|
bin/debug/extract_timeline_for_day_range_and_user.py
|
# Exports all data for the particular user for the particular day
# Used for debugging issues with trip and section generation
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import arrow
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.timequery as estt
def export_timeline(user_id_str, start_day_str, end_day_str, file_name):
logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
(user_id_str, start_day_str, end_day_str, file_name))
# day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
start_day_ts = arrow.get(start_day_str).timestamp
end_day_ts = arrow.get(end_day_str).timestamp
logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" %
(start_day_ts, arrow.get(start_day_ts),
end_day_ts, arrow.get(end_day_ts)))
ts = esta.TimeSeries.get_time_series(uuid.UUID(user_id_str))
loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))
logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries" %
(len(loc_entry_list), len(trip_entry_list), len(place_entry_list)))
json.dump(loc_entry_list + trip_entry_list + place_entry_list,
open(file_name, "w"), default=bju.default, allow_nan=False, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <user> <start_day> <end_day> <file>" % (sys.argv[0])
else:
export_timeline(user_id_str=sys.argv[1], start_day_str=sys.argv[2], end_day_str=sys.argv[3], file_name=sys.argv[4])
|
Add a new script that downloads data for a date range
|
Add a new script that downloads data for a date range
Because this is a date range, we can no longer use localdate (which is a
filtering technique).
Instead we convert to a timestamp and use timestamps instead.
We also query using the timeseries, so we get both raw and analysed data.
We need to include multiple time queries to read location data using `data.ts`,
trip-like data using `data.start_ts` and place-like data using `data.enter_ts`.
|
Python
|
bsd-3-clause
|
shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server
|
Add a new script that downloads data for a date range
Because this is a date range, we can no longer use localdate (which is a
filtering technique).
Instead we convert to a timestamp and use timestamps instead.
We also query using the timeseries, so we get both raw and analysed data.
We need to include multiple time queries to read location data using `data.ts`,
trip-like data using `data.start_ts` and place-like data using `data.enter_ts`.
|
# Exports all data for the particular user for the particular day
# Used for debugging issues with trip and section generation
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import arrow
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.timequery as estt
def export_timeline(user_id_str, start_day_str, end_day_str, file_name):
logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
(user_id_str, start_day_str, end_day_str, file_name))
# day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
start_day_ts = arrow.get(start_day_str).timestamp
end_day_ts = arrow.get(end_day_str).timestamp
logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" %
(start_day_ts, arrow.get(start_day_ts),
end_day_ts, arrow.get(end_day_ts)))
ts = esta.TimeSeries.get_time_series(uuid.UUID(user_id_str))
loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))
logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries" %
(len(loc_entry_list), len(trip_entry_list), len(place_entry_list)))
json.dump(loc_entry_list + trip_entry_list + place_entry_list,
open(file_name, "w"), default=bju.default, allow_nan=False, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <user> <start_day> <end_day> <file>" % (sys.argv[0])
else:
export_timeline(user_id_str=sys.argv[1], start_day_str=sys.argv[2], end_day_str=sys.argv[3], file_name=sys.argv[4])
|
<commit_before><commit_msg>Add a new script that downloads data for a date range
Because this is a date range, we can no longer use localdate (which is a
filtering technique).
Instead we convert to a timestamp and use timestamps instead.
We also query using the timeseries, so we get both raw and analysed data.
We need to include multiple time queries to read location data using `data.ts`,
trip-like data using `data.start_ts` and place-like data using `data.enter_ts`.<commit_after>
|
# Exports all data for the particular user for the particular day
# Used for debugging issues with trip and section generation
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import arrow
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.timequery as estt
def export_timeline(user_id_str, start_day_str, end_day_str, file_name):
logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
(user_id_str, start_day_str, end_day_str, file_name))
# day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
start_day_ts = arrow.get(start_day_str).timestamp
end_day_ts = arrow.get(end_day_str).timestamp
logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" %
(start_day_ts, arrow.get(start_day_ts),
end_day_ts, arrow.get(end_day_ts)))
ts = esta.TimeSeries.get_time_series(uuid.UUID(user_id_str))
loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))
logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries" %
(len(loc_entry_list), len(trip_entry_list), len(place_entry_list)))
json.dump(loc_entry_list + trip_entry_list + place_entry_list,
open(file_name, "w"), default=bju.default, allow_nan=False, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <user> <start_day> <end_day> <file>" % (sys.argv[0])
else:
export_timeline(user_id_str=sys.argv[1], start_day_str=sys.argv[2], end_day_str=sys.argv[3], file_name=sys.argv[4])
|
Add a new script that downloads data for a date range
Because this is a date range, we can no longer use localdate (which is a
filtering technique).
Instead we convert to a timestamp and use timestamps instead.
We also query using the timeseries, so we get both raw and analysed data.
We need to include multiple time queries to read location data using `data.ts`,
trip-like data using `data.start_ts` and place-like data using `data.enter_ts`.# Exports all data for the particular user for the particular day
# Used for debugging issues with trip and section generation
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import arrow
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.timequery as estt
def export_timeline(user_id_str, start_day_str, end_day_str, file_name):
logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
(user_id_str, start_day_str, end_day_str, file_name))
# day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
start_day_ts = arrow.get(start_day_str).timestamp
end_day_ts = arrow.get(end_day_str).timestamp
logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" %
(start_day_ts, arrow.get(start_day_ts),
end_day_ts, arrow.get(end_day_ts)))
ts = esta.TimeSeries.get_time_series(uuid.UUID(user_id_str))
loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))
logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries" %
(len(loc_entry_list), len(trip_entry_list), len(place_entry_list)))
json.dump(loc_entry_list + trip_entry_list + place_entry_list,
open(file_name, "w"), default=bju.default, allow_nan=False, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <user> <start_day> <end_day> <file>" % (sys.argv[0])
else:
export_timeline(user_id_str=sys.argv[1], start_day_str=sys.argv[2], end_day_str=sys.argv[3], file_name=sys.argv[4])
|
<commit_before><commit_msg>Add a new script that downloads data for a date range
Because this is a date range, we can no longer use localdate (which is a
filtering technique).
Instead we convert to a timestamp and use timestamps instead.
We also query using the timeseries, so we get both raw and analysed data.
We need to include multiple time queries to read location data using `data.ts`,
trip-like data using `data.start_ts` and place-like data using `data.enter_ts`.<commit_after># Exports all data for the particular user for the particular day
# Used for debugging issues with trip and section generation
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
import uuid
import datetime as pydt
import json
import bson.json_util as bju
import arrow
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.timeseries.timequery as estt
def export_timeline(user_id_str, start_day_str, end_day_str, file_name):
logging.info("Extracting timeline for user %s day %s -> %s and saving to file %s" %
(user_id_str, start_day_str, end_day_str, file_name))
# day_dt = pydt.datetime.strptime(day_str, "%Y-%m-%d").date()
start_day_ts = arrow.get(start_day_str).timestamp
end_day_ts = arrow.get(end_day_str).timestamp
logging.debug("start_day_ts = %s (%s), end_day_ts = %s (%s)" %
(start_day_ts, arrow.get(start_day_ts),
end_day_ts, arrow.get(end_day_ts)))
ts = esta.TimeSeries.get_time_series(uuid.UUID(user_id_str))
loc_time_query = estt.TimeQuery("data.ts", start_day_ts, end_day_ts)
loc_entry_list = list(ts.find_entries(key_list=None, time_query=loc_time_query))
trip_time_query = estt.TimeQuery("data.start_ts", start_day_ts, end_day_ts)
trip_entry_list = list(ts.find_entries(key_list=None, time_query=trip_time_query))
place_time_query = estt.TimeQuery("data.enter_ts", start_day_ts, end_day_ts)
place_entry_list = list(ts.find_entries(key_list=None, time_query=place_time_query))
logging.info("Found %d loc entries, %d trip-like entries, %d place-like entries" %
(len(loc_entry_list), len(trip_entry_list), len(place_entry_list)))
json.dump(loc_entry_list + trip_entry_list + place_entry_list,
open(file_name, "w"), default=bju.default, allow_nan=False, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 5:
print "Usage: %s <user> <start_day> <end_day> <file>" % (sys.argv[0])
else:
export_timeline(user_id_str=sys.argv[1], start_day_str=sys.argv[2], end_day_str=sys.argv[3], file_name=sys.argv[4])
|
|
6cc0dba8901f97a4cc5103c57fbeba9d46a7a514
|
trac/api.py
|
trac/api.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2016 Edgewall Software
# Copyright (C) 2003-2016 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Interface
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade():
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment():
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
|
Add missing file from r15148
|
1.3.1dev: Add missing file from r15148
Refs #12496.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15149 af82e41b-90c4-0310-8c96-b1721e28e2e2
|
Python
|
bsd-3-clause
|
rbaumg/trac,rbaumg/trac,rbaumg/trac,rbaumg/trac
|
1.3.1dev: Add missing file from r15148
Refs #12496.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15149 af82e41b-90c4-0310-8c96-b1721e28e2e2
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2016 Edgewall Software
# Copyright (C) 2003-2016 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Interface
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade():
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment():
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
|
<commit_before><commit_msg>1.3.1dev: Add missing file from r15148
Refs #12496.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15149 af82e41b-90c4-0310-8c96-b1721e28e2e2<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2016 Edgewall Software
# Copyright (C) 2003-2016 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Interface
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade():
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment():
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
|
1.3.1dev: Add missing file from r15148
Refs #12496.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15149 af82e41b-90c4-0310-8c96-b1721e28e2e2# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2016 Edgewall Software
# Copyright (C) 2003-2016 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Interface
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade():
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment():
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
|
<commit_before><commit_msg>1.3.1dev: Add missing file from r15148
Refs #12496.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15149 af82e41b-90c4-0310-8c96-b1721e28e2e2<commit_after># -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2016 Edgewall Software
# Copyright (C) 2003-2016 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Interface
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade():
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment():
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
|
|
c2d4c53b384a4f621ac69a813cf5d1090d6e61bd
|
members/crm/migrations/0028_auto_20200114_0921.py
|
members/crm/migrations/0028_auto_20200114_0921.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-14 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0027_auto_20190920_0824'),
]
operations = [
migrations.AlterField(
model_name='membershipapplication',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25, verbose_name=b'Institution Category'),
),
migrations.AlterField(
model_name='organization',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25),
),
]
|
Add Consortia Org as new membership type
|
Add Consortia Org as new membership type
|
Python
|
mit
|
ocwc/ocwc-members,ocwc/ocwc-members,ocwc/ocwc-members,ocwc/ocwc-members
|
Add Consortia Org as new membership type
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-14 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0027_auto_20190920_0824'),
]
operations = [
migrations.AlterField(
model_name='membershipapplication',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25, verbose_name=b'Institution Category'),
),
migrations.AlterField(
model_name='organization',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25),
),
]
|
<commit_before><commit_msg>Add Consortia Org as new membership type<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-14 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0027_auto_20190920_0824'),
]
operations = [
migrations.AlterField(
model_name='membershipapplication',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25, verbose_name=b'Institution Category'),
),
migrations.AlterField(
model_name='organization',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25),
),
]
|
Add Consortia Org as new membership type# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-14 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0027_auto_20190920_0824'),
]
operations = [
migrations.AlterField(
model_name='membershipapplication',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25, verbose_name=b'Institution Category'),
),
migrations.AlterField(
model_name='organization',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25),
),
]
|
<commit_before><commit_msg>Add Consortia Org as new membership type<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-14 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0027_auto_20190920_0824'),
]
operations = [
migrations.AlterField(
model_name='membershipapplication',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25, verbose_name=b'Institution Category'),
),
migrations.AlterField(
model_name='organization',
name='institution_type',
field=models.CharField(blank=True, choices=[(b'secondary-ed', b'Primary and secondary (K-12)'), (b'college', b'Community, technical, or vocational college'), (b'higher-ed', b'University'), (b'non-accredited', b'Informal non-accredited education'), (b'lifelong', b'Lifelong learning'), (b'consortium', b'Consortia'), (b'initiative', b'Open initiatives or special project'), (b'commercial', b'Corporate enterprise'), (b'npo', b'Non-profits, NGO\xe2\x80\x99s, IGO'), (b'cultural', b'Cultural organization'), (b'gov', b'Government')], default=b'', max_length=25),
),
]
|
|
b9609a25c80d3f0c28c173bf31fb7968c0474fbe
|
pelops/datasets/featuredataset.py
|
pelops/datasets/featuredataset.py
|
import abc
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset
class FeatureDataset(ChipDataset):
def __init__(self, filename):
# TODO: Call super
self.chips, self.feats = self.load(filename)
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
time = datetime.datetime.fromtimestamp(local_hdf5['time'][i]/1000.0)
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chips[i] = Chip(filepath, car_id, cam_id, time, misc)
return chips, feats
@staticmethod
def save(self, filename, chips, features):
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
for field in chips[0]._fields:
if isinstance(getattr(chips[0], field), datetime.datetime):
times = np.array([getattr(chip, field).timestamp() for chip in chips])
times = times * 1000.0 # Convert to ms since epoch
fOut.create_dataset(field, data=times, dtype=np.int64)
elif isinstance(getattr(chips[0], field), str):
fOut.create_dataset(field,
data=[getattr(chip, field).encode('ascii', 'ignore') for chip in chips],
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(getattr(chips[0], field), dict):
data = [json.dumps(getattr(chip, field)).encode('ascii', 'ignore') for chip in chips]
fOut.create_dataset(field,
data=data,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field, data=[getattr(chip, field) for chip in chips])
|
Create new FeatureDataset class to be used by experiment
|
Create new FeatureDataset class to be used by experiment
|
Python
|
apache-2.0
|
dave-lab41/pelops,Lab41/pelops,d-grossman/pelops,dave-lab41/pelops,d-grossman/pelops,Lab41/pelops
|
Create new FeatureDataset class to be used by experiment
|
import abc
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset
class FeatureDataset(ChipDataset):
def __init__(self, filename):
# TODO: Call super
self.chips, self.feats = self.load(filename)
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
time = datetime.datetime.fromtimestamp(local_hdf5['time'][i]/1000.0)
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chips[i] = Chip(filepath, car_id, cam_id, time, misc)
return chips, feats
@staticmethod
def save(self, filename, chips, features):
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
for field in chips[0]._fields:
if isinstance(getattr(chips[0], field), datetime.datetime):
times = np.array([getattr(chip, field).timestamp() for chip in chips])
times = times * 1000.0 # Convert to ms since epoch
fOut.create_dataset(field, data=times, dtype=np.int64)
elif isinstance(getattr(chips[0], field), str):
fOut.create_dataset(field,
data=[getattr(chip, field).encode('ascii', 'ignore') for chip in chips],
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(getattr(chips[0], field), dict):
data = [json.dumps(getattr(chip, field)).encode('ascii', 'ignore') for chip in chips]
fOut.create_dataset(field,
data=data,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field, data=[getattr(chip, field) for chip in chips])
|
<commit_before><commit_msg>Create new FeatureDataset class to be used by experiment<commit_after>
|
import abc
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset
class FeatureDataset(ChipDataset):
def __init__(self, filename):
# TODO: Call super
self.chips, self.feats = self.load(filename)
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
time = datetime.datetime.fromtimestamp(local_hdf5['time'][i]/1000.0)
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chips[i] = Chip(filepath, car_id, cam_id, time, misc)
return chips, feats
@staticmethod
def save(self, filename, chips, features):
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
for field in chips[0]._fields:
if isinstance(getattr(chips[0], field), datetime.datetime):
times = np.array([getattr(chip, field).timestamp() for chip in chips])
times = times * 1000.0 # Convert to ms since epoch
fOut.create_dataset(field, data=times, dtype=np.int64)
elif isinstance(getattr(chips[0], field), str):
fOut.create_dataset(field,
data=[getattr(chip, field).encode('ascii', 'ignore') for chip in chips],
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(getattr(chips[0], field), dict):
data = [json.dumps(getattr(chip, field)).encode('ascii', 'ignore') for chip in chips]
fOut.create_dataset(field,
data=data,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field, data=[getattr(chip, field) for chip in chips])
|
Create new FeatureDataset class to be used by experimentimport abc
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset
class FeatureDataset(ChipDataset):
def __init__(self, filename):
# TODO: Call super
self.chips, self.feats = self.load(filename)
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
time = datetime.datetime.fromtimestamp(local_hdf5['time'][i]/1000.0)
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chips[i] = Chip(filepath, car_id, cam_id, time, misc)
return chips, feats
@staticmethod
def save(self, filename, chips, features):
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
for field in chips[0]._fields:
if isinstance(getattr(chips[0], field), datetime.datetime):
times = np.array([getattr(chip, field).timestamp() for chip in chips])
times = times * 1000.0 # Convert to ms since epoch
fOut.create_dataset(field, data=times, dtype=np.int64)
elif isinstance(getattr(chips[0], field), str):
fOut.create_dataset(field,
data=[getattr(chip, field).encode('ascii', 'ignore') for chip in chips],
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(getattr(chips[0], field), dict):
data = [json.dumps(getattr(chip, field)).encode('ascii', 'ignore') for chip in chips]
fOut.create_dataset(field,
data=data,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field, data=[getattr(chip, field) for chip in chips])
|
<commit_before><commit_msg>Create new FeatureDataset class to be used by experiment<commit_after>import abc
import json
import datetime
import h5py
import numpy as np
from pelops.datasets.chip import ChipDataset
class FeatureDataset(ChipDataset):
def __init__(self, filename):
# TODO: Call super
self.chips, self.feats = self.load(filename)
@staticmethod
def load(filename):
with h5py.File(filename) as fIn:
feats = np.array(fIn['feats'])
num_items = fIn['feats'].shape[0]
# Hack to deal with performance of extracting single items
local_hdf5 = {}
local_hdf5['filepath'] = np.array(fIn['filepath'])
local_hdf5['car_id'] = np.array(fIn['car_id'])
local_hdf5['cam_id'] = np.array(fIn['cam_id'])
local_hdf5['time'] = np.array(fIn['time'])
local_hdf5['misc'] = np.array(fIn['misc'])
chips = {}
for i in range(num_items):
filepath = local_hdf5['filepath'][i].decode('utf-8')
car_id = local_hdf5['car_id'][i]
cam_id = local_hdf5['cam_id'][i]
time = datetime.datetime.fromtimestamp(local_hdf5['time'][i]/1000.0)
misc = json.loads(local_hdf5['misc'][i].decode('utf-8'))
chips[i] = Chip(filepath, car_id, cam_id, time, misc)
return chips, feats
@staticmethod
def save(self, filename, chips, features):
with h5py.File(filename, 'w') as fOut:
fOut.create_dataset('feats', data=features)
for field in chips[0]._fields:
if isinstance(getattr(chips[0], field), datetime.datetime):
times = np.array([getattr(chip, field).timestamp() for chip in chips])
times = times * 1000.0 # Convert to ms since epoch
fOut.create_dataset(field, data=times, dtype=np.int64)
elif isinstance(getattr(chips[0], field), str):
fOut.create_dataset(field,
data=[getattr(chip, field).encode('ascii', 'ignore') for chip in chips],
dtype=h5py.special_dtype(vlen=bytes))
elif isinstance(getattr(chips[0], field), dict):
data = [json.dumps(getattr(chip, field)).encode('ascii', 'ignore') for chip in chips]
fOut.create_dataset(field,
data=data,
dtype=h5py.special_dtype(vlen=bytes))
else:
fOut.create_dataset(field, data=[getattr(chip, field) for chip in chips])
|
|
2d626519e669563c655b885783f9b02946640d21
|
refstack/db/migrations/alembic/versions/434be17a6ec3_fix_openids_with_space.py
|
refstack/db/migrations/alembic/versions/434be17a6ec3_fix_openids_with_space.py
|
"""Fix openids with spaces.
A change in the openstackid naming made is so IDs with spaces
are trimmed, so %20 are no longer in the openid url. This migration
will replace any '%20' with a '.' in each openid.
Revision ID: 434be17a6ec3
Revises: 59df512e82f
Create Date: 2017-03-23 12:20:08.219294
"""
# revision identifiers, used by Alembic.
revision = '434be17a6ec3'
down_revision = '59df512e82f'
MYSQL_CHARSET = 'utf8'
from alembic import op
def upgrade():
"""Upgrade DB."""
conn = op.get_bind()
# Need to disable FOREIGN_KEY_CHECKS as a lot of tables reference the
# openid in the user table.
conn.execute("SET FOREIGN_KEY_CHECKS=0")
res = conn.execute("select * from user where openid LIKE '%%\%%20%%'")
results = res.fetchall()
for user in results:
old_openid = user[5]
new_openid = user[5].replace('%20', '.')
# Remove instances of the new openid so the old one can take
# its place.
query = "delete from user where openid='%s'" % (new_openid)
conn.execute(query.replace('%', '%%'))
# Update the openid.
query = ("update user set openid='%s' where openid='%s'" %
(new_openid, old_openid))
conn.execute(query.replace('%', '%%'))
# Update all usage of %20 in all openid references using MySQL Replace.
conn.execute("update meta set value = "
"REPLACE (value, '%%20', '.')")
conn.execute("update pubkeys set openid = "
"REPLACE (openid, '%%20', '.')")
conn.execute("update organization set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product_version set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set user_openid = "
"REPLACE (user_openid, '%%20', '.')")
conn.execute("SET FOREIGN_KEY_CHECKS=1")
def downgrade():
"""Downgrade DB."""
pass
|
Add openid fix migration script
|
Add openid fix migration script
Users who previously had a '%20' in their openstackids currently aren't able
to access their data on RefStack. This is because a '.' has taken the place of
'%20' in a recent openstack id update. This migration will reflect this change
in the database.
Reference: https://review.openstack.org/#/c/427889/
Change-Id: I58c595ea3467ddd94cd26a8ec7e284561a0dc02d
|
Python
|
apache-2.0
|
openstack/refstack,openstack/refstack,stackforge/refstack,stackforge/refstack,stackforge/refstack,openstack/refstack,openstack/refstack,stackforge/refstack
|
Add openid fix migration script
Users who previously had a '%20' in their openstackids currently aren't able
to access their data on RefStack. This is because a '.' has taken the place of
'%20' in a recent openstack id update. This migration will reflect this change
in the database.
Reference: https://review.openstack.org/#/c/427889/
Change-Id: I58c595ea3467ddd94cd26a8ec7e284561a0dc02d
|
"""Fix openids with spaces.
A change in the openstackid naming made is so IDs with spaces
are trimmed, so %20 are no longer in the openid url. This migration
will replace any '%20' with a '.' in each openid.
Revision ID: 434be17a6ec3
Revises: 59df512e82f
Create Date: 2017-03-23 12:20:08.219294
"""
# revision identifiers, used by Alembic.
revision = '434be17a6ec3'
down_revision = '59df512e82f'
MYSQL_CHARSET = 'utf8'
from alembic import op
def upgrade():
"""Upgrade DB."""
conn = op.get_bind()
# Need to disable FOREIGN_KEY_CHECKS as a lot of tables reference the
# openid in the user table.
conn.execute("SET FOREIGN_KEY_CHECKS=0")
res = conn.execute("select * from user where openid LIKE '%%\%%20%%'")
results = res.fetchall()
for user in results:
old_openid = user[5]
new_openid = user[5].replace('%20', '.')
# Remove instances of the new openid so the old one can take
# its place.
query = "delete from user where openid='%s'" % (new_openid)
conn.execute(query.replace('%', '%%'))
# Update the openid.
query = ("update user set openid='%s' where openid='%s'" %
(new_openid, old_openid))
conn.execute(query.replace('%', '%%'))
# Update all usage of %20 in all openid references using MySQL Replace.
conn.execute("update meta set value = "
"REPLACE (value, '%%20', '.')")
conn.execute("update pubkeys set openid = "
"REPLACE (openid, '%%20', '.')")
conn.execute("update organization set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product_version set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set user_openid = "
"REPLACE (user_openid, '%%20', '.')")
conn.execute("SET FOREIGN_KEY_CHECKS=1")
def downgrade():
"""Downgrade DB."""
pass
|
<commit_before><commit_msg>Add openid fix migration script
Users who previously had a '%20' in their openstackids currently aren't able
to access their data on RefStack. This is because a '.' has taken the place of
'%20' in a recent openstack id update. This migration will reflect this change
in the database.
Reference: https://review.openstack.org/#/c/427889/
Change-Id: I58c595ea3467ddd94cd26a8ec7e284561a0dc02d<commit_after>
|
"""Fix openids with spaces.
A change in the openstackid naming made is so IDs with spaces
are trimmed, so %20 are no longer in the openid url. This migration
will replace any '%20' with a '.' in each openid.
Revision ID: 434be17a6ec3
Revises: 59df512e82f
Create Date: 2017-03-23 12:20:08.219294
"""
# revision identifiers, used by Alembic.
revision = '434be17a6ec3'
down_revision = '59df512e82f'
MYSQL_CHARSET = 'utf8'
from alembic import op
def upgrade():
"""Upgrade DB."""
conn = op.get_bind()
# Need to disable FOREIGN_KEY_CHECKS as a lot of tables reference the
# openid in the user table.
conn.execute("SET FOREIGN_KEY_CHECKS=0")
res = conn.execute("select * from user where openid LIKE '%%\%%20%%'")
results = res.fetchall()
for user in results:
old_openid = user[5]
new_openid = user[5].replace('%20', '.')
# Remove instances of the new openid so the old one can take
# its place.
query = "delete from user where openid='%s'" % (new_openid)
conn.execute(query.replace('%', '%%'))
# Update the openid.
query = ("update user set openid='%s' where openid='%s'" %
(new_openid, old_openid))
conn.execute(query.replace('%', '%%'))
# Update all usage of %20 in all openid references using MySQL Replace.
conn.execute("update meta set value = "
"REPLACE (value, '%%20', '.')")
conn.execute("update pubkeys set openid = "
"REPLACE (openid, '%%20', '.')")
conn.execute("update organization set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product_version set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set user_openid = "
"REPLACE (user_openid, '%%20', '.')")
conn.execute("SET FOREIGN_KEY_CHECKS=1")
def downgrade():
"""Downgrade DB."""
pass
|
Add openid fix migration script
Users who previously had a '%20' in their openstackids currently aren't able
to access their data on RefStack. This is because a '.' has taken the place of
'%20' in a recent openstack id update. This migration will reflect this change
in the database.
Reference: https://review.openstack.org/#/c/427889/
Change-Id: I58c595ea3467ddd94cd26a8ec7e284561a0dc02d"""Fix openids with spaces.
A change in the openstackid naming made is so IDs with spaces
are trimmed, so %20 are no longer in the openid url. This migration
will replace any '%20' with a '.' in each openid.
Revision ID: 434be17a6ec3
Revises: 59df512e82f
Create Date: 2017-03-23 12:20:08.219294
"""
# revision identifiers, used by Alembic.
revision = '434be17a6ec3'
down_revision = '59df512e82f'
MYSQL_CHARSET = 'utf8'
from alembic import op
def upgrade():
"""Upgrade DB."""
conn = op.get_bind()
# Need to disable FOREIGN_KEY_CHECKS as a lot of tables reference the
# openid in the user table.
conn.execute("SET FOREIGN_KEY_CHECKS=0")
res = conn.execute("select * from user where openid LIKE '%%\%%20%%'")
results = res.fetchall()
for user in results:
old_openid = user[5]
new_openid = user[5].replace('%20', '.')
# Remove instances of the new openid so the old one can take
# its place.
query = "delete from user where openid='%s'" % (new_openid)
conn.execute(query.replace('%', '%%'))
# Update the openid.
query = ("update user set openid='%s' where openid='%s'" %
(new_openid, old_openid))
conn.execute(query.replace('%', '%%'))
# Update all usage of %20 in all openid references using MySQL Replace.
conn.execute("update meta set value = "
"REPLACE (value, '%%20', '.')")
conn.execute("update pubkeys set openid = "
"REPLACE (openid, '%%20', '.')")
conn.execute("update organization set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product_version set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set user_openid = "
"REPLACE (user_openid, '%%20', '.')")
conn.execute("SET FOREIGN_KEY_CHECKS=1")
def downgrade():
"""Downgrade DB."""
pass
|
<commit_before><commit_msg>Add openid fix migration script
Users who previously had a '%20' in their openstackids currently aren't able
to access their data on RefStack. This is because a '.' has taken the place of
'%20' in a recent openstack id update. This migration will reflect this change
in the database.
Reference: https://review.openstack.org/#/c/427889/
Change-Id: I58c595ea3467ddd94cd26a8ec7e284561a0dc02d<commit_after>"""Fix openids with spaces.
A change in the openstackid naming made is so IDs with spaces
are trimmed, so %20 are no longer in the openid url. This migration
will replace any '%20' with a '.' in each openid.
Revision ID: 434be17a6ec3
Revises: 59df512e82f
Create Date: 2017-03-23 12:20:08.219294
"""
# revision identifiers, used by Alembic.
revision = '434be17a6ec3'
down_revision = '59df512e82f'
MYSQL_CHARSET = 'utf8'
from alembic import op
def upgrade():
"""Upgrade DB."""
conn = op.get_bind()
# Need to disable FOREIGN_KEY_CHECKS as a lot of tables reference the
# openid in the user table.
conn.execute("SET FOREIGN_KEY_CHECKS=0")
res = conn.execute("select * from user where openid LIKE '%%\%%20%%'")
results = res.fetchall()
for user in results:
old_openid = user[5]
new_openid = user[5].replace('%20', '.')
# Remove instances of the new openid so the old one can take
# its place.
query = "delete from user where openid='%s'" % (new_openid)
conn.execute(query.replace('%', '%%'))
# Update the openid.
query = ("update user set openid='%s' where openid='%s'" %
(new_openid, old_openid))
conn.execute(query.replace('%', '%%'))
# Update all usage of %20 in all openid references using MySQL Replace.
conn.execute("update meta set value = "
"REPLACE (value, '%%20', '.')")
conn.execute("update pubkeys set openid = "
"REPLACE (openid, '%%20', '.')")
conn.execute("update organization set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update product_version set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set created_by_user = "
"REPLACE (created_by_user, '%%20', '.')")
conn.execute("update user_to_group set user_openid = "
"REPLACE (user_openid, '%%20', '.')")
conn.execute("SET FOREIGN_KEY_CHECKS=1")
def downgrade():
"""Downgrade DB."""
pass
|
|
b2b40ee243b24a39cf9958736d0b2d18dd78417a
|
python/stepper.py
|
python/stepper.py
|
#!/usr/bin/env python
# http://www.youtube.com/watch?v=Dc16mKFA7Fo
import time, sys, exceptions
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ControlPin = [18,22,24,26]
for pin in ControlPin:
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, 0)
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
# 512 steps is a full rotation
def anticlockwise(steps):
for i in range(steps):
for halfstep in reversed(range(8)):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
def clockwise(steps):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
if len (sys.argv) != 2 :
print "Usage: sudo python blindfold.py steps"
sys.exit (1)
try:
steps = int(sys.argv[1])
except exceptions.ValueError:
print "'steps' must be an integer, 512 is a full rotation"
sys.exit (1)
if steps > 0:
clockwise(steps)
elif steps < 0:
anticlockwise(steps * -1)
else:
print "Huh?"
GPIO.cleanup()
|
Call the python script which is MUCH faster than php gpio
|
Call the python script which is MUCH faster than php gpio
|
Python
|
mit
|
theapi/CctvBlindfoldBundle,theapi/CctvBlindfoldBundle,theapi/CctvBlindfoldBundle,theapi/CctvBlindfoldBundle
|
Call the python script which is MUCH faster than php gpio
|
#!/usr/bin/env python
# http://www.youtube.com/watch?v=Dc16mKFA7Fo
import time, sys, exceptions
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ControlPin = [18,22,24,26]
for pin in ControlPin:
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, 0)
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
# 512 steps is a full rotation
def anticlockwise(steps):
for i in range(steps):
for halfstep in reversed(range(8)):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
def clockwise(steps):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
if len (sys.argv) != 2 :
print "Usage: sudo python blindfold.py steps"
sys.exit (1)
try:
steps = int(sys.argv[1])
except exceptions.ValueError:
print "'steps' must be an integer, 512 is a full rotation"
sys.exit (1)
if steps > 0:
clockwise(steps)
elif steps < 0:
anticlockwise(steps * -1)
else:
print "Huh?"
GPIO.cleanup()
|
<commit_before><commit_msg>Call the python script which is MUCH faster than php gpio<commit_after>
|
#!/usr/bin/env python
# http://www.youtube.com/watch?v=Dc16mKFA7Fo
import time, sys, exceptions
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ControlPin = [18,22,24,26]
for pin in ControlPin:
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, 0)
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
# 512 steps is a full rotation
def anticlockwise(steps):
for i in range(steps):
for halfstep in reversed(range(8)):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
def clockwise(steps):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
if len (sys.argv) != 2 :
print "Usage: sudo python blindfold.py steps"
sys.exit (1)
try:
steps = int(sys.argv[1])
except exceptions.ValueError:
print "'steps' must be an integer, 512 is a full rotation"
sys.exit (1)
if steps > 0:
clockwise(steps)
elif steps < 0:
anticlockwise(steps * -1)
else:
print "Huh?"
GPIO.cleanup()
|
Call the python script which is MUCH faster than php gpio#!/usr/bin/env python
# http://www.youtube.com/watch?v=Dc16mKFA7Fo
import time, sys, exceptions
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ControlPin = [18,22,24,26]
for pin in ControlPin:
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, 0)
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
# 512 steps is a full rotation
def anticlockwise(steps):
for i in range(steps):
for halfstep in reversed(range(8)):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
def clockwise(steps):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
if len (sys.argv) != 2 :
print "Usage: sudo python blindfold.py steps"
sys.exit (1)
try:
steps = int(sys.argv[1])
except exceptions.ValueError:
print "'steps' must be an integer, 512 is a full rotation"
sys.exit (1)
if steps > 0:
clockwise(steps)
elif steps < 0:
anticlockwise(steps * -1)
else:
print "Huh?"
GPIO.cleanup()
|
<commit_before><commit_msg>Call the python script which is MUCH faster than php gpio<commit_after>#!/usr/bin/env python
# http://www.youtube.com/watch?v=Dc16mKFA7Fo
import time, sys, exceptions
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ControlPin = [18,22,24,26]
for pin in ControlPin:
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin, 0)
seq = [ [1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1] ]
# 512 steps is a full rotation
def anticlockwise(steps):
for i in range(steps):
for halfstep in reversed(range(8)):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
def clockwise(steps):
for i in range(steps):
for halfstep in range(8):
for pin in range(4):
GPIO.output(ControlPin[pin], seq[halfstep][pin])
time.sleep(0.001)
if len (sys.argv) != 2 :
print "Usage: sudo python blindfold.py steps"
sys.exit (1)
try:
steps = int(sys.argv[1])
except exceptions.ValueError:
print "'steps' must be an integer, 512 is a full rotation"
sys.exit (1)
if steps > 0:
clockwise(steps)
elif steps < 0:
anticlockwise(steps * -1)
else:
print "Huh?"
GPIO.cleanup()
|
|
7991e4bdcc31137ff4634144acd1469dbe09eb07
|
POST_TEST.py
|
POST_TEST.py
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creator
|
Test for ISO NC creator
|
Python
|
bsd-3-clause
|
JamieFBousfield/heekscnc,sangit/heekscnc,dnevels/heekscnc,hackendless/heekscnc,pyrotron/heekscnc,pyrotron/heekscnc,gaziel/heekscnc,JamieFBousfield/heekscnc,AlanZheng/heekscnc,FluffyMortain/heekscnc,BbiKkuMi/heekscnc,Nurb432/heekscnc,gaziel/heekscnc,singwina/heekscnc,blakelyc/heekscnc,tbinias/heekscnc,ant-t/heekscnc,ant-t/heekscnc,masika/heekscnc,AlanZheng/heekscnc,singwina/heekscnc,hackendless/heekscnc,blakelyc/heekscnc,blakelyc/heekscnc,elliotf/heekscnc,blakelyc/heekscnc,tbinias/heekscnc,gaziel/heekscnc,singwina/heekscnc,hackendless/heekscnc,AlanZheng/heekscnc,pyrotron/heekscnc,elliotf/heekscnc,masika/heekscnc,sangit/heekscnc,FluffyMortain/heekscnc,hackendless/heekscnc,FluffyMortain/heekscnc,elliotf/heekscnc,BbiKkuMi/heekscnc,AlanZheng/heekscnc,elliotf/heekscnc,dnevels/heekscnc,gaziel/heekscnc,JamieFBousfield/heekscnc,ant-t/heekscnc,pyrotron/heekscnc,singwina/heekscnc,masika/heekscnc,dnevels/heekscnc,ant-t/heekscnc,Nurb432/heekscnc,masika/heekscnc,tbinias/heekscnc,JamieFBousfield/heekscnc,FluffyMortain/heekscnc,BbiKkuMi/heekscnc,Nurb432/heekscnc,sangit/heekscnc,tbinias/heekscnc,dnevels/heekscnc,sangit/heekscnc,BbiKkuMi/heekscnc,Nurb432/heekscnc
|
Test for ISO NC creator
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creatorfrom posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
|
fbdf7e53dba142cf45270e63623aea59a4ffecf6
|
test_add_group.py
|
test_add_group.py
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("https://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ddggjhjkol")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("fdfghghjjk")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("gfhhj;l")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test scenario written by SeleniumBuilder
|
Test scenario written by SeleniumBuilder
|
Python
|
apache-2.0
|
figharo54/python_training
|
Test scenario written by SeleniumBuilder
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("https://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ddggjhjkol")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("fdfghghjjk")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("gfhhj;l")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test scenario written by SeleniumBuilder<commit_after>
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("https://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ddggjhjkol")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("fdfghghjjk")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("gfhhj;l")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test scenario written by SeleniumBuilder# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("https://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ddggjhjkol")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("fdfghghjjk")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("gfhhj;l")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test scenario written by SeleniumBuilder<commit_after># -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("https://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ddggjhjkol")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("fdfghghjjk")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("gfhhj;l")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
|
1b7f846d096d1b8c36213666b49f2075d3f5fac1
|
tools/abandon_gerrit_cls.py
|
tools/abandon_gerrit_cls.py
|
#!/usr/bin/env python
#
# Copyright 2020 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bulk abandon Gerrit CLs."""
import argparse
import os
import re
import subprocess
import sys
from infra import git
from infra import go
def run_abandon_cls(args):
"""Bulk abandon Gerrit CLs."""
go.mod_download()
go.install(go.INFRA_GO+'/scripts/abandon_gerrit_cls')
subprocess.check_call([
'abandon_gerrit_cls',
'--gerrit_instance', args.gerrit_instance,
'--abandon_reason', args.abandon_reason,
'--last_modified_before_days', str(args.last_modified_before_days),
])
def main():
# TODO(rmistry): Instead of attempting to keep these args in sync, defer to
# abandon_gerrit_cls for argument parsing.
d = 'Helper script for bulk abandoning gerrit CLs'
parser = argparse.ArgumentParser(description=d)
parser.add_argument(
'--gerrit-instance', '-g', default='https://skia-review.googlesource.com',
help='Name of the gerrit instance.')
parser.add_argument(
'--abandon-reason', '-a', default='',
help='Will be used as reason for abandoning.')
parser.add_argument(
'--last-modified-before-days', '-l', default=0,
help='If 3 is specified then all CLs that were modified after 3 days ago '
'will be returned.')
args = parser.parse_args()
go.check()
run_abandon_cls(args)
if __name__ == '__main__':
main()
|
Add wrapper script to call tool to bulk abandon Gerrit CLs
|
Add wrapper script to call tool to bulk abandon Gerrit CLs
Makes it easy to invoke https://skia-review.googlesource.com/c/buildbot/+/275096
from the Skia repo
Change-Id: If94d506d86a2b4319c7e0a7c830d5dab27916c10
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275693
Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
Python
|
bsd-3-clause
|
google/skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,google/skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,google/skia,google/skia,aosp-mirror/platform_external_skia
|
Add wrapper script to call tool to bulk abandon Gerrit CLs
Makes it easy to invoke https://skia-review.googlesource.com/c/buildbot/+/275096
from the Skia repo
Change-Id: If94d506d86a2b4319c7e0a7c830d5dab27916c10
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275693
Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
#!/usr/bin/env python
#
# Copyright 2020 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bulk abandon Gerrit CLs."""
import argparse
import os
import re
import subprocess
import sys
from infra import git
from infra import go
def run_abandon_cls(args):
"""Bulk abandon Gerrit CLs."""
go.mod_download()
go.install(go.INFRA_GO+'/scripts/abandon_gerrit_cls')
subprocess.check_call([
'abandon_gerrit_cls',
'--gerrit_instance', args.gerrit_instance,
'--abandon_reason', args.abandon_reason,
'--last_modified_before_days', str(args.last_modified_before_days),
])
def main():
# TODO(rmistry): Instead of attempting to keep these args in sync, defer to
# abandon_gerrit_cls for argument parsing.
d = 'Helper script for bulk abandoning gerrit CLs'
parser = argparse.ArgumentParser(description=d)
parser.add_argument(
'--gerrit-instance', '-g', default='https://skia-review.googlesource.com',
help='Name of the gerrit instance.')
parser.add_argument(
'--abandon-reason', '-a', default='',
help='Will be used as reason for abandoning.')
parser.add_argument(
'--last-modified-before-days', '-l', default=0,
help='If 3 is specified then all CLs that were modified after 3 days ago '
'will be returned.')
args = parser.parse_args()
go.check()
run_abandon_cls(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add wrapper script to call tool to bulk abandon Gerrit CLs
Makes it easy to invoke https://skia-review.googlesource.com/c/buildbot/+/275096
from the Skia repo
Change-Id: If94d506d86a2b4319c7e0a7c830d5dab27916c10
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275693
Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>
|
#!/usr/bin/env python
#
# Copyright 2020 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bulk abandon Gerrit CLs."""
import argparse
import os
import re
import subprocess
import sys
from infra import git
from infra import go
def run_abandon_cls(args):
"""Bulk abandon Gerrit CLs."""
go.mod_download()
go.install(go.INFRA_GO+'/scripts/abandon_gerrit_cls')
subprocess.check_call([
'abandon_gerrit_cls',
'--gerrit_instance', args.gerrit_instance,
'--abandon_reason', args.abandon_reason,
'--last_modified_before_days', str(args.last_modified_before_days),
])
def main():
# TODO(rmistry): Instead of attempting to keep these args in sync, defer to
# abandon_gerrit_cls for argument parsing.
d = 'Helper script for bulk abandoning gerrit CLs'
parser = argparse.ArgumentParser(description=d)
parser.add_argument(
'--gerrit-instance', '-g', default='https://skia-review.googlesource.com',
help='Name of the gerrit instance.')
parser.add_argument(
'--abandon-reason', '-a', default='',
help='Will be used as reason for abandoning.')
parser.add_argument(
'--last-modified-before-days', '-l', default=0,
help='If 3 is specified then all CLs that were modified after 3 days ago '
'will be returned.')
args = parser.parse_args()
go.check()
run_abandon_cls(args)
if __name__ == '__main__':
main()
|
Add wrapper script to call tool to bulk abandon Gerrit CLs
Makes it easy to invoke https://skia-review.googlesource.com/c/buildbot/+/275096
from the Skia repo
Change-Id: If94d506d86a2b4319c7e0a7c830d5dab27916c10
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275693
Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>#!/usr/bin/env python
#
# Copyright 2020 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bulk abandon Gerrit CLs."""
import argparse
import os
import re
import subprocess
import sys
from infra import git
from infra import go
def run_abandon_cls(args):
"""Bulk abandon Gerrit CLs."""
go.mod_download()
go.install(go.INFRA_GO+'/scripts/abandon_gerrit_cls')
subprocess.check_call([
'abandon_gerrit_cls',
'--gerrit_instance', args.gerrit_instance,
'--abandon_reason', args.abandon_reason,
'--last_modified_before_days', str(args.last_modified_before_days),
])
def main():
# TODO(rmistry): Instead of attempting to keep these args in sync, defer to
# abandon_gerrit_cls for argument parsing.
d = 'Helper script for bulk abandoning gerrit CLs'
parser = argparse.ArgumentParser(description=d)
parser.add_argument(
'--gerrit-instance', '-g', default='https://skia-review.googlesource.com',
help='Name of the gerrit instance.')
parser.add_argument(
'--abandon-reason', '-a', default='',
help='Will be used as reason for abandoning.')
parser.add_argument(
'--last-modified-before-days', '-l', default=0,
help='If 3 is specified then all CLs that were modified after 3 days ago '
'will be returned.')
args = parser.parse_args()
go.check()
run_abandon_cls(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add wrapper script to call tool to bulk abandon Gerrit CLs
Makes it easy to invoke https://skia-review.googlesource.com/c/buildbot/+/275096
from the Skia repo
Change-Id: If94d506d86a2b4319c7e0a7c830d5dab27916c10
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/275693
Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>#!/usr/bin/env python
#
# Copyright 2020 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bulk abandon Gerrit CLs."""
import argparse
import os
import re
import subprocess
import sys
from infra import git
from infra import go
def run_abandon_cls(args):
"""Bulk abandon Gerrit CLs."""
go.mod_download()
go.install(go.INFRA_GO+'/scripts/abandon_gerrit_cls')
subprocess.check_call([
'abandon_gerrit_cls',
'--gerrit_instance', args.gerrit_instance,
'--abandon_reason', args.abandon_reason,
'--last_modified_before_days', str(args.last_modified_before_days),
])
def main():
# TODO(rmistry): Instead of attempting to keep these args in sync, defer to
# abandon_gerrit_cls for argument parsing.
d = 'Helper script for bulk abandoning gerrit CLs'
parser = argparse.ArgumentParser(description=d)
parser.add_argument(
'--gerrit-instance', '-g', default='https://skia-review.googlesource.com',
help='Name of the gerrit instance.')
parser.add_argument(
'--abandon-reason', '-a', default='',
help='Will be used as reason for abandoning.')
parser.add_argument(
'--last-modified-before-days', '-l', default=0,
help='If 3 is specified then all CLs that were modified after 3 days ago '
'will be returned.')
args = parser.parse_args()
go.check()
run_abandon_cls(args)
if __name__ == '__main__':
main()
|
|
620db37e867b25af25e6e93b90c78c1bd7ce19c3
|
server/auvsi_suas/migrations/0006_target_blank.py
|
server/auvsi_suas/migrations/0006_target_blank.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0005_target'), ]
operations = [
migrations.AlterField(
model_name='target',
name='alphanumeric',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='alphanumeric_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='background_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='description',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='location',
field=models.ForeignKey(blank=True,
to='auvsi_suas.GpsPosition',
null=True), ),
migrations.AlterField(
model_name='target',
name='orientation',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'n'), (2, b'ne'), (3, b'e'), (4, b'se'),
(5, b's'), (6, b'sw'), (7, b'w'), (8, b'nw')]), ),
migrations.AlterField(
model_name='target',
name='shape',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'circle'), (2, b'semicircle'), (
3, b'quarter_circle'), (4, b'triangle'), (5, b'square'), (
6, b'rectangle'), (7, b'trapezoid'), (8, b'pentagon'),
(9, b'hexagon'), (10, b'heptagon'), (11, b'octagon'),
(12, b'star'), (13, b'cross')]), ),
]
|
Add missing blank field migration
|
Add missing blank field migration
I forgot to create this migration when adding blank attributes to Target
fields.
|
Python
|
apache-2.0
|
transformation/utatuav-interop,justineaster/interop,transformation/utatuav-interop,auvsi-suas/interop,justineaster/interop,transformation/utatuav-interop,auvsi-suas/interop,justineaster/interop,transformation/utatuav-interop,justineaster/interop,auvsi-suas/interop,auvsi-suas/interop,transformation/utatuav-interop,transformation/utatuav-interop,justineaster/interop
|
Add missing blank field migration
I forgot to create this migration when adding blank attributes to Target
fields.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0005_target'), ]
operations = [
migrations.AlterField(
model_name='target',
name='alphanumeric',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='alphanumeric_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='background_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='description',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='location',
field=models.ForeignKey(blank=True,
to='auvsi_suas.GpsPosition',
null=True), ),
migrations.AlterField(
model_name='target',
name='orientation',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'n'), (2, b'ne'), (3, b'e'), (4, b'se'),
(5, b's'), (6, b'sw'), (7, b'w'), (8, b'nw')]), ),
migrations.AlterField(
model_name='target',
name='shape',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'circle'), (2, b'semicircle'), (
3, b'quarter_circle'), (4, b'triangle'), (5, b'square'), (
6, b'rectangle'), (7, b'trapezoid'), (8, b'pentagon'),
(9, b'hexagon'), (10, b'heptagon'), (11, b'octagon'),
(12, b'star'), (13, b'cross')]), ),
]
|
<commit_before><commit_msg>Add missing blank field migration
I forgot to create this migration when adding blank attributes to Target
fields.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0005_target'), ]
operations = [
migrations.AlterField(
model_name='target',
name='alphanumeric',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='alphanumeric_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='background_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='description',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='location',
field=models.ForeignKey(blank=True,
to='auvsi_suas.GpsPosition',
null=True), ),
migrations.AlterField(
model_name='target',
name='orientation',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'n'), (2, b'ne'), (3, b'e'), (4, b'se'),
(5, b's'), (6, b'sw'), (7, b'w'), (8, b'nw')]), ),
migrations.AlterField(
model_name='target',
name='shape',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'circle'), (2, b'semicircle'), (
3, b'quarter_circle'), (4, b'triangle'), (5, b'square'), (
6, b'rectangle'), (7, b'trapezoid'), (8, b'pentagon'),
(9, b'hexagon'), (10, b'heptagon'), (11, b'octagon'),
(12, b'star'), (13, b'cross')]), ),
]
|
Add missing blank field migration
I forgot to create this migration when adding blank attributes to Target
fields.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0005_target'), ]
operations = [
migrations.AlterField(
model_name='target',
name='alphanumeric',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='alphanumeric_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='background_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='description',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='location',
field=models.ForeignKey(blank=True,
to='auvsi_suas.GpsPosition',
null=True), ),
migrations.AlterField(
model_name='target',
name='orientation',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'n'), (2, b'ne'), (3, b'e'), (4, b'se'),
(5, b's'), (6, b'sw'), (7, b'w'), (8, b'nw')]), ),
migrations.AlterField(
model_name='target',
name='shape',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'circle'), (2, b'semicircle'), (
3, b'quarter_circle'), (4, b'triangle'), (5, b'square'), (
6, b'rectangle'), (7, b'trapezoid'), (8, b'pentagon'),
(9, b'hexagon'), (10, b'heptagon'), (11, b'octagon'),
(12, b'star'), (13, b'cross')]), ),
]
|
<commit_before><commit_msg>Add missing blank field migration
I forgot to create this migration when adding blank attributes to Target
fields.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0005_target'), ]
operations = [
migrations.AlterField(
model_name='target',
name='alphanumeric',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='alphanumeric_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='background_color',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'white'), (2, b'black'), (3, b'gray'), (
4, b'red'), (5, b'blue'), (6, b'green'), (7, b'yellow'),
(8, b'purple'), (9, b'brown'), (10, b'orange')]), ),
migrations.AlterField(
model_name='target',
name='description',
field=models.TextField(default=b'',
blank=True), ),
migrations.AlterField(
model_name='target',
name='location',
field=models.ForeignKey(blank=True,
to='auvsi_suas.GpsPosition',
null=True), ),
migrations.AlterField(
model_name='target',
name='orientation',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'n'), (2, b'ne'), (3, b'e'), (4, b'se'),
(5, b's'), (6, b'sw'), (7, b'w'), (8, b'nw')]), ),
migrations.AlterField(
model_name='target',
name='shape',
field=models.IntegerField(
blank=True,
null=True,
choices=[(1, b'circle'), (2, b'semicircle'), (
3, b'quarter_circle'), (4, b'triangle'), (5, b'square'), (
6, b'rectangle'), (7, b'trapezoid'), (8, b'pentagon'),
(9, b'hexagon'), (10, b'heptagon'), (11, b'octagon'),
(12, b'star'), (13, b'cross')]), ),
]
|
|
aca86345382a3b8d2fd8822e25d44cde4859cb17
|
IPython/core/tests/test_hooks.py
|
IPython/core/tests/test_hooks.py
|
# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
|
Add basic tests for CommandChainDispatcher
|
Add basic tests for CommandChainDispatcher
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add basic tests for CommandChainDispatcher
|
# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
|
<commit_before><commit_msg>Add basic tests for CommandChainDispatcher<commit_after>
|
# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
|
Add basic tests for CommandChainDispatcher# -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
|
<commit_before><commit_msg>Add basic tests for CommandChainDispatcher<commit_after># -*- coding: utf-8 -*-
"""Tests for CommandChainDispatcher."""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.core.error import TryNext
from IPython.core.hooks import CommandChainDispatcher
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# Define two classes, one which succeeds and one which raises TryNext. Each
# sets the attribute `called` to True when it is called.
class Okay(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
return self.message
class Fail(object):
def __init__(self, message):
self.message = message
self.called = False
def __call__(self):
self.called = True
raise TryNext(self.message)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_command_chain_dispatcher_ff():
"""Test two failing hooks"""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
dp = CommandChainDispatcher([(0, fail1),
(10, fail2)])
try:
dp()
except TryNext as e:
nt.assert_equal(str(e), u'fail2')
else:
assert False, "Expected exception was not raised."
nt.assert_true(fail1.called)
nt.assert_true(fail2.called)
def test_command_chain_dispatcher_fofo():
"""Test a mixture of failing and succeeding hooks."""
fail1 = Fail(u'fail1')
fail2 = Fail(u'fail2')
okay1 = Okay(u'okay1')
okay2 = Okay(u'okay2')
dp = CommandChainDispatcher([(0, fail1),
# (5, okay1), # add this later
(10, fail2),
(15, okay2)])
dp.add(okay1, 5)
nt.assert_equal(dp(), u'okay1')
nt.assert_true(fail1.called)
nt.assert_true(okay1.called)
nt.assert_false(fail2.called)
nt.assert_false(okay2.called)
|
|
caa85643122dfbc0337d1f0cae414707d93ebd90
|
comics/crawlers/supereffective.py
|
comics/crawlers/supereffective.py
|
import datetime as dt
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Super Effective'
language = 'en'
url = 'http://www.vgcats.com/super/'
start_date = '2008-04-23'
history_capable_date = '2008-04-23'
time_zone = -5
rights = 'Scott Ramsoomair'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.vgcats.com/super/images/%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
Add crawler for 'Super Effective'
|
Add crawler for 'Super Effective'
|
Python
|
agpl-3.0
|
klette/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics,datagutten/comics,klette/comics,datagutten/comics,klette/comics,datagutten/comics,jodal/comics
|
Add crawler for 'Super Effective'
|
import datetime as dt
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Super Effective'
language = 'en'
url = 'http://www.vgcats.com/super/'
start_date = '2008-04-23'
history_capable_date = '2008-04-23'
time_zone = -5
rights = 'Scott Ramsoomair'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.vgcats.com/super/images/%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
<commit_before><commit_msg>Add crawler for 'Super Effective'<commit_after>
|
import datetime as dt
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Super Effective'
language = 'en'
url = 'http://www.vgcats.com/super/'
start_date = '2008-04-23'
history_capable_date = '2008-04-23'
time_zone = -5
rights = 'Scott Ramsoomair'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.vgcats.com/super/images/%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
Add crawler for 'Super Effective'import datetime as dt
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Super Effective'
language = 'en'
url = 'http://www.vgcats.com/super/'
start_date = '2008-04-23'
history_capable_date = '2008-04-23'
time_zone = -5
rights = 'Scott Ramsoomair'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.vgcats.com/super/images/%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
<commit_before><commit_msg>Add crawler for 'Super Effective'<commit_after>import datetime as dt
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Super Effective'
language = 'en'
url = 'http://www.vgcats.com/super/'
start_date = '2008-04-23'
history_capable_date = '2008-04-23'
time_zone = -5
rights = 'Scott Ramsoomair'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.vgcats.com/super/images/%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
|
82fa78c397d428742883ec8e1cdc94bcd3491ad3
|
salt/auth/rest.py
|
salt/auth/rest.py
|
# -*- coding: utf-8 -*-
'''
Provide authentication using a REST call
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
rest:
^url: https://url/for/rest/call
fred:
- .*
- '@runner'
If there are entries underneath the ^url entry then they are merged with any responses
from the REST call. In the above example, assuming the REST call does not return
any additional ACLs, this will authenticate Fred via a REST call and allow him to
run any execution module and all runners.
The REST call should return a JSON object that maps to a regular eauth YAML structure
as above.
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'django'
def __virtual__():
return __virtualname__
def rest_auth_setup():
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if '^url' in __opts__['external_auth']['rest']:
return __opts__['external_auth']['rest']['^url']
else
return False
def auth(username, password):
'''
REST authentication
'''
url = rest_auth_setup()
data = { 'username': username, 'password': password }
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
__opts__['external_auth']['rest'][username] = result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
return False
|
Add simple eauth via REST
|
Add simple eauth via REST
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add simple eauth via REST
|
# -*- coding: utf-8 -*-
'''
Provide authentication using a REST call
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
rest:
^url: https://url/for/rest/call
fred:
- .*
- '@runner'
If there are entries underneath the ^url entry then they are merged with any responses
from the REST call. In the above example, assuming the REST call does not return
any additional ACLs, this will authenticate Fred via a REST call and allow him to
run any execution module and all runners.
The REST call should return a JSON object that maps to a regular eauth YAML structure
as above.
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'django'
def __virtual__():
return __virtualname__
def rest_auth_setup():
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if '^url' in __opts__['external_auth']['rest']:
return __opts__['external_auth']['rest']['^url']
else
return False
def auth(username, password):
'''
REST authentication
'''
url = rest_auth_setup()
data = { 'username': username, 'password': password }
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
__opts__['external_auth']['rest'][username] = result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
return False
|
<commit_before><commit_msg>Add simple eauth via REST<commit_after>
|
# -*- coding: utf-8 -*-
'''
Provide authentication using a REST call
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
rest:
^url: https://url/for/rest/call
fred:
- .*
- '@runner'
If there are entries underneath the ^url entry then they are merged with any responses
from the REST call. In the above example, assuming the REST call does not return
any additional ACLs, this will authenticate Fred via a REST call and allow him to
run any execution module and all runners.
The REST call should return a JSON object that maps to a regular eauth YAML structure
as above.
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'django'
def __virtual__():
return __virtualname__
def rest_auth_setup():
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if '^url' in __opts__['external_auth']['rest']:
return __opts__['external_auth']['rest']['^url']
else
return False
def auth(username, password):
'''
REST authentication
'''
url = rest_auth_setup()
data = { 'username': username, 'password': password }
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
__opts__['external_auth']['rest'][username] = result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
return False
|
Add simple eauth via REST# -*- coding: utf-8 -*-
'''
Provide authentication using a REST call
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
rest:
^url: https://url/for/rest/call
fred:
- .*
- '@runner'
If there are entries underneath the ^url entry then they are merged with any responses
from the REST call. In the above example, assuming the REST call does not return
any additional ACLs, this will authenticate Fred via a REST call and allow him to
run any execution module and all runners.
The REST call should return a JSON object that maps to a regular eauth YAML structure
as above.
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'django'
def __virtual__():
return __virtualname__
def rest_auth_setup():
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if '^url' in __opts__['external_auth']['rest']:
return __opts__['external_auth']['rest']['^url']
else
return False
def auth(username, password):
'''
REST authentication
'''
url = rest_auth_setup()
data = { 'username': username, 'password': password }
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
__opts__['external_auth']['rest'][username] = result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
return False
|
<commit_before><commit_msg>Add simple eauth via REST<commit_after># -*- coding: utf-8 -*-
'''
Provide authentication using a REST call
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
rest:
^url: https://url/for/rest/call
fred:
- .*
- '@runner'
If there are entries underneath the ^url entry then they are merged with any responses
from the REST call. In the above example, assuming the REST call does not return
any additional ACLs, this will authenticate Fred via a REST call and allow him to
run any execution module and all runners.
The REST call should return a JSON object that maps to a regular eauth YAML structure
as above.
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils.http
log = logging.getLogger(__name__)
__virtualname__ = 'django'
def __virtual__():
return __virtualname__
def rest_auth_setup():
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if '^url' in __opts__['external_auth']['rest']:
return __opts__['external_auth']['rest']['^url']
else
return False
def auth(username, password):
'''
REST authentication
'''
url = rest_auth_setup()
data = { 'username': username, 'password': password }
# Post to the API endpoint. If 200 is returned then the result will be the ACLs
# for this user
result = salt.utils.http.query(url, method='POST', data=data)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
__opts__['external_auth']['rest'][username] = result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
return False
|
|
5364966528139d4f0b4d51db9c72f168547a19c5
|
construct/tests/test_repeaters.py
|
construct/tests/test_repeaters.py
|
import unittest
from construct import OptionalGreedyRepeater, UBInt8
class TestOptionalGreedyRepeater(unittest.TestCase):
def setUp(self):
self.c = OptionalGreedyRepeater(UBInt8("foo"))
def test_trivial(self):
pass
def test_empty_parse(self):
self.assertEqual(self.c.parse(""), [])
def test_parse(self):
self.assertEqual(self.c.parse("\x01\x02"), [1, 2])
def test_empty_build(self):
self.assertEqual(self.c.build([]), "")
def test_build(self):
self.assertEqual(self.c.build([1, 2]), "\x01\x02")
|
Add tests for example from OGR docs.
|
tests: Add tests for example from OGR docs.
|
Python
|
mit
|
MostAwesomeDude/construct,gkonstantyno/construct,0000-bigtree/construct,mosquito/construct,gkonstantyno/construct,riggs/construct,mosquito/construct,MostAwesomeDude/construct,riggs/construct,0000-bigtree/construct
|
tests: Add tests for example from OGR docs.
|
import unittest
from construct import OptionalGreedyRepeater, UBInt8
class TestOptionalGreedyRepeater(unittest.TestCase):
def setUp(self):
self.c = OptionalGreedyRepeater(UBInt8("foo"))
def test_trivial(self):
pass
def test_empty_parse(self):
self.assertEqual(self.c.parse(""), [])
def test_parse(self):
self.assertEqual(self.c.parse("\x01\x02"), [1, 2])
def test_empty_build(self):
self.assertEqual(self.c.build([]), "")
def test_build(self):
self.assertEqual(self.c.build([1, 2]), "\x01\x02")
|
<commit_before><commit_msg>tests: Add tests for example from OGR docs.<commit_after>
|
import unittest
from construct import OptionalGreedyRepeater, UBInt8
class TestOptionalGreedyRepeater(unittest.TestCase):
def setUp(self):
self.c = OptionalGreedyRepeater(UBInt8("foo"))
def test_trivial(self):
pass
def test_empty_parse(self):
self.assertEqual(self.c.parse(""), [])
def test_parse(self):
self.assertEqual(self.c.parse("\x01\x02"), [1, 2])
def test_empty_build(self):
self.assertEqual(self.c.build([]), "")
def test_build(self):
self.assertEqual(self.c.build([1, 2]), "\x01\x02")
|
tests: Add tests for example from OGR docs.import unittest
from construct import OptionalGreedyRepeater, UBInt8
class TestOptionalGreedyRepeater(unittest.TestCase):
def setUp(self):
self.c = OptionalGreedyRepeater(UBInt8("foo"))
def test_trivial(self):
pass
def test_empty_parse(self):
self.assertEqual(self.c.parse(""), [])
def test_parse(self):
self.assertEqual(self.c.parse("\x01\x02"), [1, 2])
def test_empty_build(self):
self.assertEqual(self.c.build([]), "")
def test_build(self):
self.assertEqual(self.c.build([1, 2]), "\x01\x02")
|
<commit_before><commit_msg>tests: Add tests for example from OGR docs.<commit_after>import unittest
from construct import OptionalGreedyRepeater, UBInt8
class TestOptionalGreedyRepeater(unittest.TestCase):
def setUp(self):
self.c = OptionalGreedyRepeater(UBInt8("foo"))
def test_trivial(self):
pass
def test_empty_parse(self):
self.assertEqual(self.c.parse(""), [])
def test_parse(self):
self.assertEqual(self.c.parse("\x01\x02"), [1, 2])
def test_empty_build(self):
self.assertEqual(self.c.build([]), "")
def test_build(self):
self.assertEqual(self.c.build([1, 2]), "\x01\x02")
|
|
b3258a47ef9fa657be12687d6e8892360c27ade1
|
test/test_usage.py
|
test/test_usage.py
|
from ccs.icd9 import ICD9
from clinvoc.icd9 import ICD9CM, ICD9PCS
from nose.tools import assert_equals
def test_icd9():
codesets = ICD9()
dx_vocab = ICD9CM()
px_vocab = ICD9PCS()
for k, v in codesets.dx_single_level_codes:
assert isinstance(k, basestring)
assert isinstance(v, set) # FIXME: Currently fails because these are lists instead of sets
for code in v:
assert_equals(code, dx_vocab.standardize(code))
# TODO: Do these type and code formatting checks for all the other components
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
Add usage test. Currently failing because expects sets and gets lists.
|
Add usage test. Currently failing because expects sets and gets lists.
|
Python
|
mit
|
mattlewissf/ccs
|
Add usage test. Currently failing because expects sets and gets lists.
|
from ccs.icd9 import ICD9
from clinvoc.icd9 import ICD9CM, ICD9PCS
from nose.tools import assert_equals
def test_icd9():
codesets = ICD9()
dx_vocab = ICD9CM()
px_vocab = ICD9PCS()
for k, v in codesets.dx_single_level_codes:
assert isinstance(k, basestring)
assert isinstance(v, set) # FIXME: Currently fails because these are lists instead of sets
for code in v:
assert_equals(code, dx_vocab.standardize(code))
# TODO: Do these type and code formatting checks for all the other components
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
<commit_before><commit_msg>Add usage test. Currently failing because expects sets and gets lists.<commit_after>
|
from ccs.icd9 import ICD9
from clinvoc.icd9 import ICD9CM, ICD9PCS
from nose.tools import assert_equals
def test_icd9():
codesets = ICD9()
dx_vocab = ICD9CM()
px_vocab = ICD9PCS()
for k, v in codesets.dx_single_level_codes:
assert isinstance(k, basestring)
assert isinstance(v, set) # FIXME: Currently fails because these are lists instead of sets
for code in v:
assert_equals(code, dx_vocab.standardize(code))
# TODO: Do these type and code formatting checks for all the other components
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
Add usage test. Currently failing because expects sets and gets lists.from ccs.icd9 import ICD9
from clinvoc.icd9 import ICD9CM, ICD9PCS
from nose.tools import assert_equals
def test_icd9():
codesets = ICD9()
dx_vocab = ICD9CM()
px_vocab = ICD9PCS()
for k, v in codesets.dx_single_level_codes:
assert isinstance(k, basestring)
assert isinstance(v, set) # FIXME: Currently fails because these are lists instead of sets
for code in v:
assert_equals(code, dx_vocab.standardize(code))
# TODO: Do these type and code formatting checks for all the other components
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
<commit_before><commit_msg>Add usage test. Currently failing because expects sets and gets lists.<commit_after>from ccs.icd9 import ICD9
from clinvoc.icd9 import ICD9CM, ICD9PCS
from nose.tools import assert_equals
def test_icd9():
codesets = ICD9()
dx_vocab = ICD9CM()
px_vocab = ICD9PCS()
for k, v in codesets.dx_single_level_codes:
assert isinstance(k, basestring)
assert isinstance(v, set) # FIXME: Currently fails because these are lists instead of sets
for code in v:
assert_equals(code, dx_vocab.standardize(code))
# TODO: Do these type and code formatting checks for all the other components
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
|
f75ece646685ee5f4e876d3dce962dca4859b33a
|
tests/integration/services/authentication/test_authenticate.py
|
tests/integration/services/authentication/test_authenticate.py
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from pytest import raises
from byceps.services.authentication.exceptions import AuthenticationFailed
from byceps.services.authentication import service as authn_service
CORRECT_PASSWORD = 'opensesame'
WRONG_PASSWORD = '123456'
def test_uninitialized_user_is_rejected(make_user):
user = create_user(make_user, initialized=False)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_suspended_user_is_rejected(make_user):
user = create_user(make_user, suspended=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_deleted_user_is_rejected(make_user):
user = create_user(make_user, deleted=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_with_wrong_password_is_rejected(make_user):
user = create_user(make_user)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, WRONG_PASSWORD)
def test_active_user_with_screen_name_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user)
authenticated_user = authn_service.authenticate(
user.screen_name, CORRECT_PASSWORD
)
assert authenticated_user is not None
def test_active_user_with_email_address_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user, email_address='ehrenmann@mail.test')
authenticated_user = authn_service.authenticate(
'ehrenmann@mail.test', CORRECT_PASSWORD
)
assert authenticated_user is not None
def create_user(make_user, **kwargs):
return make_user(password=CORRECT_PASSWORD, **kwargs)
|
Test user authentication against service
|
Test user authentication against service
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
|
Test user authentication against service
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from pytest import raises
from byceps.services.authentication.exceptions import AuthenticationFailed
from byceps.services.authentication import service as authn_service
CORRECT_PASSWORD = 'opensesame'
WRONG_PASSWORD = '123456'
def test_uninitialized_user_is_rejected(make_user):
user = create_user(make_user, initialized=False)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_suspended_user_is_rejected(make_user):
user = create_user(make_user, suspended=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_deleted_user_is_rejected(make_user):
user = create_user(make_user, deleted=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_with_wrong_password_is_rejected(make_user):
user = create_user(make_user)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, WRONG_PASSWORD)
def test_active_user_with_screen_name_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user)
authenticated_user = authn_service.authenticate(
user.screen_name, CORRECT_PASSWORD
)
assert authenticated_user is not None
def test_active_user_with_email_address_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user, email_address='ehrenmann@mail.test')
authenticated_user = authn_service.authenticate(
'ehrenmann@mail.test', CORRECT_PASSWORD
)
assert authenticated_user is not None
def create_user(make_user, **kwargs):
return make_user(password=CORRECT_PASSWORD, **kwargs)
|
<commit_before><commit_msg>Test user authentication against service<commit_after>
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from pytest import raises
from byceps.services.authentication.exceptions import AuthenticationFailed
from byceps.services.authentication import service as authn_service
CORRECT_PASSWORD = 'opensesame'
WRONG_PASSWORD = '123456'
def test_uninitialized_user_is_rejected(make_user):
user = create_user(make_user, initialized=False)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_suspended_user_is_rejected(make_user):
user = create_user(make_user, suspended=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_deleted_user_is_rejected(make_user):
user = create_user(make_user, deleted=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_with_wrong_password_is_rejected(make_user):
user = create_user(make_user)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, WRONG_PASSWORD)
def test_active_user_with_screen_name_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user)
authenticated_user = authn_service.authenticate(
user.screen_name, CORRECT_PASSWORD
)
assert authenticated_user is not None
def test_active_user_with_email_address_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user, email_address='ehrenmann@mail.test')
authenticated_user = authn_service.authenticate(
'ehrenmann@mail.test', CORRECT_PASSWORD
)
assert authenticated_user is not None
def create_user(make_user, **kwargs):
return make_user(password=CORRECT_PASSWORD, **kwargs)
|
Test user authentication against service"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from pytest import raises
from byceps.services.authentication.exceptions import AuthenticationFailed
from byceps.services.authentication import service as authn_service
CORRECT_PASSWORD = 'opensesame'
WRONG_PASSWORD = '123456'
def test_uninitialized_user_is_rejected(make_user):
user = create_user(make_user, initialized=False)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_suspended_user_is_rejected(make_user):
user = create_user(make_user, suspended=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_deleted_user_is_rejected(make_user):
user = create_user(make_user, deleted=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_with_wrong_password_is_rejected(make_user):
user = create_user(make_user)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, WRONG_PASSWORD)
def test_active_user_with_screen_name_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user)
authenticated_user = authn_service.authenticate(
user.screen_name, CORRECT_PASSWORD
)
assert authenticated_user is not None
def test_active_user_with_email_address_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user, email_address='ehrenmann@mail.test')
authenticated_user = authn_service.authenticate(
'ehrenmann@mail.test', CORRECT_PASSWORD
)
assert authenticated_user is not None
def create_user(make_user, **kwargs):
return make_user(password=CORRECT_PASSWORD, **kwargs)
|
<commit_before><commit_msg>Test user authentication against service<commit_after>"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from pytest import raises
from byceps.services.authentication.exceptions import AuthenticationFailed
from byceps.services.authentication import service as authn_service
CORRECT_PASSWORD = 'opensesame'
WRONG_PASSWORD = '123456'
def test_uninitialized_user_is_rejected(make_user):
user = create_user(make_user, initialized=False)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_suspended_user_is_rejected(make_user):
user = create_user(make_user, suspended=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_deleted_user_is_rejected(make_user):
user = create_user(make_user, deleted=True)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, CORRECT_PASSWORD)
def test_with_wrong_password_is_rejected(make_user):
user = create_user(make_user)
with raises(AuthenticationFailed):
authn_service.authenticate(user.screen_name, WRONG_PASSWORD)
def test_active_user_with_screen_name_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user)
authenticated_user = authn_service.authenticate(
user.screen_name, CORRECT_PASSWORD
)
assert authenticated_user is not None
def test_active_user_with_email_address_and_correct_password_is_accepted(
make_user,
):
user = create_user(make_user, email_address='ehrenmann@mail.test')
authenticated_user = authn_service.authenticate(
'ehrenmann@mail.test', CORRECT_PASSWORD
)
assert authenticated_user is not None
def create_user(make_user, **kwargs):
return make_user(password=CORRECT_PASSWORD, **kwargs)
|
|
92049964c334f9367016ed55bdb94d844fd2425d
|
python/switch_test.py
|
python/switch_test.py
|
#!/usr/bin/env python
"""
Test buttons by switching LED colors
"""
import time
from octo import Octo
from listener import ButtonListener
def main():
octo = Octo('/dev/ttyACM0')
octo.reset()
handler = ButtonHandler(octo)
listener = ButtonListener(octo, handler)
print "Press any of the Octo buttons. Press Ctrl + C to exit."
listener.run()
# This class defines methods for handling button events
#
# Method names follow this format: "button_<number>_<event>
# where number is in range 1..8 and event is either "press" or "release"
class ButtonHandler:
def __init__(self, octo):
self.octo = octo
def button_0_press(self):
self.octo.led0(255, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 0)
def button_1_press(self):
self.octo.led0(0, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 255)
def button_2_press(self):
self.octo.led0(0, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 0)
def button_3_press(self):
self.octo.led0(255, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 255)
def button_4_press(self):
self.octo.led0(255, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 255)
def button_5_press(self):
self.octo.led0(0, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 255)
def button_6_press(self):
self.octo.led0(255, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 0)
def button_7_press(self):
self.octo.led0(0, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 0)
if __name__ == '__main__':
main()
|
Add test script for switching LEDs with Teensy buttons
|
Add test script for switching LEDs with Teensy buttons
|
Python
|
mit
|
anroots/teensy-moonica
|
Add test script for switching LEDs with Teensy buttons
|
#!/usr/bin/env python
"""
Test buttons by switching LED colors
"""
import time
from octo import Octo
from listener import ButtonListener
def main():
octo = Octo('/dev/ttyACM0')
octo.reset()
handler = ButtonHandler(octo)
listener = ButtonListener(octo, handler)
print "Press any of the Octo buttons. Press Ctrl + C to exit."
listener.run()
# This class defines methods for handling button events
#
# Method names follow this format: "button_<number>_<event>
# where number is in range 1..8 and event is either "press" or "release"
class ButtonHandler:
def __init__(self, octo):
self.octo = octo
def button_0_press(self):
self.octo.led0(255, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 0)
def button_1_press(self):
self.octo.led0(0, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 255)
def button_2_press(self):
self.octo.led0(0, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 0)
def button_3_press(self):
self.octo.led0(255, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 255)
def button_4_press(self):
self.octo.led0(255, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 255)
def button_5_press(self):
self.octo.led0(0, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 255)
def button_6_press(self):
self.octo.led0(255, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 0)
def button_7_press(self):
self.octo.led0(0, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test script for switching LEDs with Teensy buttons<commit_after>
|
#!/usr/bin/env python
"""
Test buttons by switching LED colors
"""
import time
from octo import Octo
from listener import ButtonListener
def main():
octo = Octo('/dev/ttyACM0')
octo.reset()
handler = ButtonHandler(octo)
listener = ButtonListener(octo, handler)
print "Press any of the Octo buttons. Press Ctrl + C to exit."
listener.run()
# This class defines methods for handling button events
#
# Method names follow this format: "button_<number>_<event>
# where number is in range 1..8 and event is either "press" or "release"
class ButtonHandler:
def __init__(self, octo):
self.octo = octo
def button_0_press(self):
self.octo.led0(255, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 0)
def button_1_press(self):
self.octo.led0(0, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 255)
def button_2_press(self):
self.octo.led0(0, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 0)
def button_3_press(self):
self.octo.led0(255, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 255)
def button_4_press(self):
self.octo.led0(255, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 255)
def button_5_press(self):
self.octo.led0(0, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 255)
def button_6_press(self):
self.octo.led0(255, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 0)
def button_7_press(self):
self.octo.led0(0, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 0)
if __name__ == '__main__':
main()
|
Add test script for switching LEDs with Teensy buttons#!/usr/bin/env python
"""
Test buttons by switching LED colors
"""
import time
from octo import Octo
from listener import ButtonListener
def main():
octo = Octo('/dev/ttyACM0')
octo.reset()
handler = ButtonHandler(octo)
listener = ButtonListener(octo, handler)
print "Press any of the Octo buttons. Press Ctrl + C to exit."
listener.run()
# This class defines methods for handling button events
#
# Method names follow this format: "button_<number>_<event>
# where number is in range 1..8 and event is either "press" or "release"
class ButtonHandler:
def __init__(self, octo):
self.octo = octo
def button_0_press(self):
self.octo.led0(255, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 0)
def button_1_press(self):
self.octo.led0(0, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 255)
def button_2_press(self):
self.octo.led0(0, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 0)
def button_3_press(self):
self.octo.led0(255, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 255)
def button_4_press(self):
self.octo.led0(255, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 255)
def button_5_press(self):
self.octo.led0(0, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 255)
def button_6_press(self):
self.octo.led0(255, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 0)
def button_7_press(self):
self.octo.led0(0, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test script for switching LEDs with Teensy buttons<commit_after>#!/usr/bin/env python
"""
Test buttons by switching LED colors
"""
import time
from octo import Octo
from listener import ButtonListener
def main():
octo = Octo('/dev/ttyACM0')
octo.reset()
handler = ButtonHandler(octo)
listener = ButtonListener(octo, handler)
print "Press any of the Octo buttons. Press Ctrl + C to exit."
listener.run()
# This class defines methods for handling button events
#
# Method names follow this format: "button_<number>_<event>
# where number is in range 1..8 and event is either "press" or "release"
class ButtonHandler:
def __init__(self, octo):
self.octo = octo
def button_0_press(self):
self.octo.led0(255, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 0)
def button_1_press(self):
self.octo.led0(0, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 255)
def button_2_press(self):
self.octo.led0(0, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 0)
def button_3_press(self):
self.octo.led0(255, 255, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 255)
def button_4_press(self):
self.octo.led0(255, 0, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 255, 255)
def button_5_press(self):
self.octo.led0(0, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(255, 255, 255)
def button_6_press(self):
self.octo.led0(255, 255, 255)
time.sleep(Octo.DELAY)
self.octo.led1(0, 0, 0)
def button_7_press(self):
self.octo.led0(0, 0, 0)
time.sleep(Octo.DELAY)
self.octo.led1(255, 0, 0)
if __name__ == '__main__':
main()
|
|
24456b31fc68bf7d8b90c424951eb092ecb72d66
|
client.py
|
client.py
|
"""
This module defines the main interface for running dlex experiments.
"""
import db # pylint: disable=unused-import
class Client(object):
"""An object for executing CLI commands."""
def __init__(self, db_name='test.db'):
self.ddb = db.DLEXDB(db_name)
def close(self):
# type: () -> ()
"""Close the CLI object."""
self.ddb.close()
def add(self, def_name, def_path):
# type: (str, str) -> bool
"""Creates a new experiment definition
This command tries to create a new experiment. It does so by trying
to load the module at `def_path`.
Args:
def_name: name of the experiment to create
def_path: filesystem path containing the experiment (i.e. a class
inheriting from the Experiment class)
Returns:
True on success, False on failure
"""
def_id = self.ddb.insert_definition(def_name, def_path)
if def_id is None:
return False
return True
def remove(self, def_name):
# type: (str) -> bool
"""Removes an experiment definition
Args:
def_name: name of the experiment definition to remove
Returns:
True on success, False on failure
"""
return self.ddb.delete_definition(def_name)
def list(self):
# type: () -> List[Union[None, Dict[str, Union[int, str]]]]
"""Lists all experiment difinitions
Returns:
A list of all experiment definitions
"""
return self.ddb.get_definitions()
def create(self, def_name, hyperparams):
# type: (str, Dict[str, Union[int, str]]) -> int
"""Creates a new experiment from a definition
Args:
def_name: (str) name of the experiment definition
hyperparams: (dict) a dict of the hyperparams for the experiment
Returns:
The ID of the experiment
"""
return self.ddb.create_experiment(def_name, hyperparams)
|
Add class for running CLI commands
|
Add class for running CLI commands
This commit adds a module which contains a class for running commands
which will be invoked by the command-line interface.
|
Python
|
apache-2.0
|
sagelywizard/dlex
|
Add class for running CLI commands
This commit adds a module which contains a class for running commands
which will be invoked by the command-line interface.
|
"""
This module defines the main interface for running dlex experiments.
"""
import db # pylint: disable=unused-import
class Client(object):
"""An object for executing CLI commands."""
def __init__(self, db_name='test.db'):
self.ddb = db.DLEXDB(db_name)
def close(self):
# type: () -> ()
"""Close the CLI object."""
self.ddb.close()
def add(self, def_name, def_path):
# type: (str, str) -> bool
"""Creates a new experiment definition
This command tries to create a new experiment. It does so by trying
to load the module at `def_path`.
Args:
def_name: name of the experiment to create
def_path: filesystem path containing the experiment (i.e. a class
inheriting from the Experiment class)
Returns:
True on success, False on failure
"""
def_id = self.ddb.insert_definition(def_name, def_path)
if def_id is None:
return False
return True
def remove(self, def_name):
# type: (str) -> bool
"""Removes an experiment definition
Args:
def_name: name of the experiment definition to remove
Returns:
True on success, False on failure
"""
return self.ddb.delete_definition(def_name)
def list(self):
# type: () -> List[Union[None, Dict[str, Union[int, str]]]]
"""Lists all experiment difinitions
Returns:
A list of all experiment definitions
"""
return self.ddb.get_definitions()
def create(self, def_name, hyperparams):
# type: (str, Dict[str, Union[int, str]]) -> int
"""Creates a new experiment from a definition
Args:
def_name: (str) name of the experiment definition
hyperparams: (dict) a dict of the hyperparams for the experiment
Returns:
The ID of the experiment
"""
return self.ddb.create_experiment(def_name, hyperparams)
|
<commit_before><commit_msg>Add class for running CLI commands
This commit adds a module which contains a class for running commands
which will be invoked by the command-line interface.<commit_after>
|
"""
This module defines the main interface for running dlex experiments.
"""
import db # pylint: disable=unused-import
class Client(object):
"""An object for executing CLI commands."""
def __init__(self, db_name='test.db'):
self.ddb = db.DLEXDB(db_name)
def close(self):
# type: () -> ()
"""Close the CLI object."""
self.ddb.close()
def add(self, def_name, def_path):
# type: (str, str) -> bool
"""Creates a new experiment definition
This command tries to create a new experiment. It does so by trying
to load the module at `def_path`.
Args:
def_name: name of the experiment to create
def_path: filesystem path containing the experiment (i.e. a class
inheriting from the Experiment class)
Returns:
True on success, False on failure
"""
def_id = self.ddb.insert_definition(def_name, def_path)
if def_id is None:
return False
return True
def remove(self, def_name):
# type: (str) -> bool
"""Removes an experiment definition
Args:
def_name: name of the experiment definition to remove
Returns:
True on success, False on failure
"""
return self.ddb.delete_definition(def_name)
def list(self):
# type: () -> List[Union[None, Dict[str, Union[int, str]]]]
"""Lists all experiment difinitions
Returns:
A list of all experiment definitions
"""
return self.ddb.get_definitions()
def create(self, def_name, hyperparams):
# type: (str, Dict[str, Union[int, str]]) -> int
"""Creates a new experiment from a definition
Args:
def_name: (str) name of the experiment definition
hyperparams: (dict) a dict of the hyperparams for the experiment
Returns:
The ID of the experiment
"""
return self.ddb.create_experiment(def_name, hyperparams)
|
Add class for running CLI commands
This commit adds a module which contains a class for running commands
which will be invoked by the command-line interface."""
This module defines the main interface for running dlex experiments.
"""
import db # pylint: disable=unused-import
class Client(object):
"""An object for executing CLI commands."""
def __init__(self, db_name='test.db'):
self.ddb = db.DLEXDB(db_name)
def close(self):
# type: () -> ()
"""Close the CLI object."""
self.ddb.close()
def add(self, def_name, def_path):
# type: (str, str) -> bool
"""Creates a new experiment definition
This command tries to create a new experiment. It does so by trying
to load the module at `def_path`.
Args:
def_name: name of the experiment to create
def_path: filesystem path containing the experiment (i.e. a class
inheriting from the Experiment class)
Returns:
True on success, False on failure
"""
def_id = self.ddb.insert_definition(def_name, def_path)
if def_id is None:
return False
return True
def remove(self, def_name):
# type: (str) -> bool
"""Removes an experiment definition
Args:
def_name: name of the experiment definition to remove
Returns:
True on success, False on failure
"""
return self.ddb.delete_definition(def_name)
def list(self):
# type: () -> List[Union[None, Dict[str, Union[int, str]]]]
"""Lists all experiment difinitions
Returns:
A list of all experiment definitions
"""
return self.ddb.get_definitions()
def create(self, def_name, hyperparams):
# type: (str, Dict[str, Union[int, str]]) -> int
"""Creates a new experiment from a definition
Args:
def_name: (str) name of the experiment definition
hyperparams: (dict) a dict of the hyperparams for the experiment
Returns:
The ID of the experiment
"""
return self.ddb.create_experiment(def_name, hyperparams)
|
<commit_before><commit_msg>Add class for running CLI commands
This commit adds a module which contains a class for running commands
which will be invoked by the command-line interface.<commit_after>"""
This module defines the main interface for running dlex experiments.
"""
import db # pylint: disable=unused-import
class Client(object):
"""An object for executing CLI commands."""
def __init__(self, db_name='test.db'):
self.ddb = db.DLEXDB(db_name)
def close(self):
# type: () -> ()
"""Close the CLI object."""
self.ddb.close()
def add(self, def_name, def_path):
# type: (str, str) -> bool
"""Creates a new experiment definition
This command tries to create a new experiment. It does so by trying
to load the module at `def_path`.
Args:
def_name: name of the experiment to create
def_path: filesystem path containing the experiment (i.e. a class
inheriting from the Experiment class)
Returns:
True on success, False on failure
"""
def_id = self.ddb.insert_definition(def_name, def_path)
if def_id is None:
return False
return True
def remove(self, def_name):
# type: (str) -> bool
"""Removes an experiment definition
Args:
def_name: name of the experiment definition to remove
Returns:
True on success, False on failure
"""
return self.ddb.delete_definition(def_name)
def list(self):
# type: () -> List[Union[None, Dict[str, Union[int, str]]]]
"""Lists all experiment difinitions
Returns:
A list of all experiment definitions
"""
return self.ddb.get_definitions()
def create(self, def_name, hyperparams):
# type: (str, Dict[str, Union[int, str]]) -> int
"""Creates a new experiment from a definition
Args:
def_name: (str) name of the experiment definition
hyperparams: (dict) a dict of the hyperparams for the experiment
Returns:
The ID of the experiment
"""
return self.ddb.create_experiment(def_name, hyperparams)
|
|
cf2db79a4f1d93ea6015931cdcf58c9e09cc8bfb
|
imap_cli/tests/test_search.py
|
imap_cli/tests/test_search.py
|
# -*- coding: utf-8 -*-
"""Test helpers"""
import imaplib
import unittest
from imap_cli import config
from imap_cli import list_mail
from imap_cli import tests
class HelpersTest(unittest.TestCase):
def setUp(self):
self.ctx = config.new_context_from_file('~/.config/imap-cli')
imaplib.IMAP4_SSL = tests.ImapConnectionMock()
def test_basic_search(self):
self.ctx.mail_account = imaplib.IMAP4_SSL()
self.ctx.mail_account.login()
for mail_info in list_mail.list_mail(self.ctx, directory='INBOX'):
assert mail_info == {
'date': 'Tue, 03 Jan 1989 09:42:34 +0200',
'flags': ['\\Seen', 'NonJunk'],
'mail_id': '1',
'mail_from': 'exampleFrom <example@from.org>',
'subject': u'Mocking IMAP Protocols',
'to': 'exampleTo <example@to.org>',
}
|
Add test for basic search
|
Add test for basic search
|
Python
|
mit
|
Gentux/imap-cli,Gentux/imap-cli
|
Add test for basic search
|
# -*- coding: utf-8 -*-
"""Test helpers"""
import imaplib
import unittest
from imap_cli import config
from imap_cli import list_mail
from imap_cli import tests
class HelpersTest(unittest.TestCase):
def setUp(self):
self.ctx = config.new_context_from_file('~/.config/imap-cli')
imaplib.IMAP4_SSL = tests.ImapConnectionMock()
def test_basic_search(self):
self.ctx.mail_account = imaplib.IMAP4_SSL()
self.ctx.mail_account.login()
for mail_info in list_mail.list_mail(self.ctx, directory='INBOX'):
assert mail_info == {
'date': 'Tue, 03 Jan 1989 09:42:34 +0200',
'flags': ['\\Seen', 'NonJunk'],
'mail_id': '1',
'mail_from': 'exampleFrom <example@from.org>',
'subject': u'Mocking IMAP Protocols',
'to': 'exampleTo <example@to.org>',
}
|
<commit_before><commit_msg>Add test for basic search<commit_after>
|
# -*- coding: utf-8 -*-
"""Test helpers"""
import imaplib
import unittest
from imap_cli import config
from imap_cli import list_mail
from imap_cli import tests
class HelpersTest(unittest.TestCase):
def setUp(self):
self.ctx = config.new_context_from_file('~/.config/imap-cli')
imaplib.IMAP4_SSL = tests.ImapConnectionMock()
def test_basic_search(self):
self.ctx.mail_account = imaplib.IMAP4_SSL()
self.ctx.mail_account.login()
for mail_info in list_mail.list_mail(self.ctx, directory='INBOX'):
assert mail_info == {
'date': 'Tue, 03 Jan 1989 09:42:34 +0200',
'flags': ['\\Seen', 'NonJunk'],
'mail_id': '1',
'mail_from': 'exampleFrom <example@from.org>',
'subject': u'Mocking IMAP Protocols',
'to': 'exampleTo <example@to.org>',
}
|
Add test for basic search# -*- coding: utf-8 -*-
"""Test helpers"""
import imaplib
import unittest
from imap_cli import config
from imap_cli import list_mail
from imap_cli import tests
class HelpersTest(unittest.TestCase):
def setUp(self):
self.ctx = config.new_context_from_file('~/.config/imap-cli')
imaplib.IMAP4_SSL = tests.ImapConnectionMock()
def test_basic_search(self):
self.ctx.mail_account = imaplib.IMAP4_SSL()
self.ctx.mail_account.login()
for mail_info in list_mail.list_mail(self.ctx, directory='INBOX'):
assert mail_info == {
'date': 'Tue, 03 Jan 1989 09:42:34 +0200',
'flags': ['\\Seen', 'NonJunk'],
'mail_id': '1',
'mail_from': 'exampleFrom <example@from.org>',
'subject': u'Mocking IMAP Protocols',
'to': 'exampleTo <example@to.org>',
}
|
<commit_before><commit_msg>Add test for basic search<commit_after># -*- coding: utf-8 -*-
"""Test helpers"""
import imaplib
import unittest
from imap_cli import config
from imap_cli import list_mail
from imap_cli import tests
class HelpersTest(unittest.TestCase):
def setUp(self):
self.ctx = config.new_context_from_file('~/.config/imap-cli')
imaplib.IMAP4_SSL = tests.ImapConnectionMock()
def test_basic_search(self):
self.ctx.mail_account = imaplib.IMAP4_SSL()
self.ctx.mail_account.login()
for mail_info in list_mail.list_mail(self.ctx, directory='INBOX'):
assert mail_info == {
'date': 'Tue, 03 Jan 1989 09:42:34 +0200',
'flags': ['\\Seen', 'NonJunk'],
'mail_id': '1',
'mail_from': 'exampleFrom <example@from.org>',
'subject': u'Mocking IMAP Protocols',
'to': 'exampleTo <example@to.org>',
}
|
|
1a67e1a5b1e6830df0667689a832dea0b9460656
|
CodeFights/evenDigitsOnly.py
|
CodeFights/evenDigitsOnly.py
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def evenDigitsOnly(n):
return sum(map(lambda d: int(d) % 2, str(n))) == 0
# Alternative solution
# return all([int(i) % 2 == 0 for i in str(n)])
def main():
tests = [
[248622, True],
[642386, False],
[248842, True],
[1, False],
[8, True],
[2462487, False],
[468402800, True],
[2468428, True],
[5468428, False],
[7468428, False]
]
for t in tests:
res = evenDigitsOnly(t[0])
if t[1] == res:
print("PASSED: evenDigitsOnly({}) returned {}"
.format(t[0], res))
else:
print("FAILED: evenDigitsOnly({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights even digits only problem
|
Solve Code Fights even digits only problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights even digits only problem
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def evenDigitsOnly(n):
return sum(map(lambda d: int(d) % 2, str(n))) == 0
# Alternative solution
# return all([int(i) % 2 == 0 for i in str(n)])
def main():
tests = [
[248622, True],
[642386, False],
[248842, True],
[1, False],
[8, True],
[2462487, False],
[468402800, True],
[2468428, True],
[5468428, False],
[7468428, False]
]
for t in tests:
res = evenDigitsOnly(t[0])
if t[1] == res:
print("PASSED: evenDigitsOnly({}) returned {}"
.format(t[0], res))
else:
print("FAILED: evenDigitsOnly({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights even digits only problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Add Border Problem
def evenDigitsOnly(n):
return sum(map(lambda d: int(d) % 2, str(n))) == 0
# Alternative solution
# return all([int(i) % 2 == 0 for i in str(n)])
def main():
tests = [
[248622, True],
[642386, False],
[248842, True],
[1, False],
[8, True],
[2462487, False],
[468402800, True],
[2468428, True],
[5468428, False],
[7468428, False]
]
for t in tests:
res = evenDigitsOnly(t[0])
if t[1] == res:
print("PASSED: evenDigitsOnly({}) returned {}"
.format(t[0], res))
else:
print("FAILED: evenDigitsOnly({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights even digits only problem#!/usr/local/bin/python
# Code Fights Add Border Problem
def evenDigitsOnly(n):
return sum(map(lambda d: int(d) % 2, str(n))) == 0
# Alternative solution
# return all([int(i) % 2 == 0 for i in str(n)])
def main():
tests = [
[248622, True],
[642386, False],
[248842, True],
[1, False],
[8, True],
[2462487, False],
[468402800, True],
[2468428, True],
[5468428, False],
[7468428, False]
]
for t in tests:
res = evenDigitsOnly(t[0])
if t[1] == res:
print("PASSED: evenDigitsOnly({}) returned {}"
.format(t[0], res))
else:
print("FAILED: evenDigitsOnly({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights even digits only problem<commit_after>#!/usr/local/bin/python
# Code Fights Add Border Problem
def evenDigitsOnly(n):
return sum(map(lambda d: int(d) % 2, str(n))) == 0
# Alternative solution
# return all([int(i) % 2 == 0 for i in str(n)])
def main():
tests = [
[248622, True],
[642386, False],
[248842, True],
[1, False],
[8, True],
[2462487, False],
[468402800, True],
[2468428, True],
[5468428, False],
[7468428, False]
]
for t in tests:
res = evenDigitsOnly(t[0])
if t[1] == res:
print("PASSED: evenDigitsOnly({}) returned {}"
.format(t[0], res))
else:
print("FAILED: evenDigitsOnly({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
da66d477986b2ddc6c80f7fb0e59c93b2182c576
|
visualize_trajectories.py
|
visualize_trajectories.py
|
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gui.grid_image_visualizer import GridImageVisualizer
from gui.arrow_plotter import ArrowPlotter
import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_fname', type=str, help='file name of data container')
parser.add_argument('--steps', '-t', type=int, default=10)
args = parser.parse_args()
container = utils.container.ImageDataContainer(args.data_fname)
environment_config = container.get_info('environment_config')
action_space = utils.config.from_config(environment_config['action_space'])
fig = plt.figure(figsize=(2*args.steps, 2), frameon=False, tight_layout=True)
gs = gridspec.GridSpec(2, args.steps)
image_visualizer = GridImageVisualizer(fig, gs[0, :], args.steps, rows=1)
labels = ['pan', 'tilt']
limits = [action_space.low, action_space.high]
arrow_plotters = [ArrowPlotter(fig, gs[1, i], labels, limits) for i in range(args.steps)]
plt.show(block=False)
num_trajs, num_steps = container.get_data_shape('action')
assert container.get_data_shape('state') == (num_trajs, num_steps + 1)
assert num_steps % args.steps == 0
for traj_iter in range(num_trajs):
images = []
actions = []
for step_iter in range(num_steps):
image, action = container.get_datum(traj_iter, step_iter, ['image', 'action'])
images.append(image.copy())
actions.append(action.copy())
if len(images) == args.steps:
image_visualizer.update(images)
for action, arrow_plotter in zip(actions, arrow_plotters):
arrow_plotter.update(action)
import IPython as ipy; ipy.embed()
images = []
actions = []
container.close()
if __name__ == "__main__":
main()
|
Add script to visualize trajectories images and actions
|
Add script to visualize trajectories images and actions
|
Python
|
mit
|
alexlee-gk/visual_dynamics
|
Add script to visualize trajectories images and actions
|
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gui.grid_image_visualizer import GridImageVisualizer
from gui.arrow_plotter import ArrowPlotter
import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_fname', type=str, help='file name of data container')
parser.add_argument('--steps', '-t', type=int, default=10)
args = parser.parse_args()
container = utils.container.ImageDataContainer(args.data_fname)
environment_config = container.get_info('environment_config')
action_space = utils.config.from_config(environment_config['action_space'])
fig = plt.figure(figsize=(2*args.steps, 2), frameon=False, tight_layout=True)
gs = gridspec.GridSpec(2, args.steps)
image_visualizer = GridImageVisualizer(fig, gs[0, :], args.steps, rows=1)
labels = ['pan', 'tilt']
limits = [action_space.low, action_space.high]
arrow_plotters = [ArrowPlotter(fig, gs[1, i], labels, limits) for i in range(args.steps)]
plt.show(block=False)
num_trajs, num_steps = container.get_data_shape('action')
assert container.get_data_shape('state') == (num_trajs, num_steps + 1)
assert num_steps % args.steps == 0
for traj_iter in range(num_trajs):
images = []
actions = []
for step_iter in range(num_steps):
image, action = container.get_datum(traj_iter, step_iter, ['image', 'action'])
images.append(image.copy())
actions.append(action.copy())
if len(images) == args.steps:
image_visualizer.update(images)
for action, arrow_plotter in zip(actions, arrow_plotters):
arrow_plotter.update(action)
import IPython as ipy; ipy.embed()
images = []
actions = []
container.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to visualize trajectories images and actions<commit_after>
|
import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gui.grid_image_visualizer import GridImageVisualizer
from gui.arrow_plotter import ArrowPlotter
import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_fname', type=str, help='file name of data container')
parser.add_argument('--steps', '-t', type=int, default=10)
args = parser.parse_args()
container = utils.container.ImageDataContainer(args.data_fname)
environment_config = container.get_info('environment_config')
action_space = utils.config.from_config(environment_config['action_space'])
fig = plt.figure(figsize=(2*args.steps, 2), frameon=False, tight_layout=True)
gs = gridspec.GridSpec(2, args.steps)
image_visualizer = GridImageVisualizer(fig, gs[0, :], args.steps, rows=1)
labels = ['pan', 'tilt']
limits = [action_space.low, action_space.high]
arrow_plotters = [ArrowPlotter(fig, gs[1, i], labels, limits) for i in range(args.steps)]
plt.show(block=False)
num_trajs, num_steps = container.get_data_shape('action')
assert container.get_data_shape('state') == (num_trajs, num_steps + 1)
assert num_steps % args.steps == 0
for traj_iter in range(num_trajs):
images = []
actions = []
for step_iter in range(num_steps):
image, action = container.get_datum(traj_iter, step_iter, ['image', 'action'])
images.append(image.copy())
actions.append(action.copy())
if len(images) == args.steps:
image_visualizer.update(images)
for action, arrow_plotter in zip(actions, arrow_plotters):
arrow_plotter.update(action)
import IPython as ipy; ipy.embed()
images = []
actions = []
container.close()
if __name__ == "__main__":
main()
|
Add script to visualize trajectories images and actionsimport argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gui.grid_image_visualizer import GridImageVisualizer
from gui.arrow_plotter import ArrowPlotter
import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_fname', type=str, help='file name of data container')
parser.add_argument('--steps', '-t', type=int, default=10)
args = parser.parse_args()
container = utils.container.ImageDataContainer(args.data_fname)
environment_config = container.get_info('environment_config')
action_space = utils.config.from_config(environment_config['action_space'])
fig = plt.figure(figsize=(2*args.steps, 2), frameon=False, tight_layout=True)
gs = gridspec.GridSpec(2, args.steps)
image_visualizer = GridImageVisualizer(fig, gs[0, :], args.steps, rows=1)
labels = ['pan', 'tilt']
limits = [action_space.low, action_space.high]
arrow_plotters = [ArrowPlotter(fig, gs[1, i], labels, limits) for i in range(args.steps)]
plt.show(block=False)
num_trajs, num_steps = container.get_data_shape('action')
assert container.get_data_shape('state') == (num_trajs, num_steps + 1)
assert num_steps % args.steps == 0
for traj_iter in range(num_trajs):
images = []
actions = []
for step_iter in range(num_steps):
image, action = container.get_datum(traj_iter, step_iter, ['image', 'action'])
images.append(image.copy())
actions.append(action.copy())
if len(images) == args.steps:
image_visualizer.update(images)
for action, arrow_plotter in zip(actions, arrow_plotters):
arrow_plotter.update(action)
import IPython as ipy; ipy.embed()
images = []
actions = []
container.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to visualize trajectories images and actions<commit_after>import argparse
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gui.grid_image_visualizer import GridImageVisualizer
from gui.arrow_plotter import ArrowPlotter
import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_fname', type=str, help='file name of data container')
parser.add_argument('--steps', '-t', type=int, default=10)
args = parser.parse_args()
container = utils.container.ImageDataContainer(args.data_fname)
environment_config = container.get_info('environment_config')
action_space = utils.config.from_config(environment_config['action_space'])
fig = plt.figure(figsize=(2*args.steps, 2), frameon=False, tight_layout=True)
gs = gridspec.GridSpec(2, args.steps)
image_visualizer = GridImageVisualizer(fig, gs[0, :], args.steps, rows=1)
labels = ['pan', 'tilt']
limits = [action_space.low, action_space.high]
arrow_plotters = [ArrowPlotter(fig, gs[1, i], labels, limits) for i in range(args.steps)]
plt.show(block=False)
num_trajs, num_steps = container.get_data_shape('action')
assert container.get_data_shape('state') == (num_trajs, num_steps + 1)
assert num_steps % args.steps == 0
for traj_iter in range(num_trajs):
images = []
actions = []
for step_iter in range(num_steps):
image, action = container.get_datum(traj_iter, step_iter, ['image', 'action'])
images.append(image.copy())
actions.append(action.copy())
if len(images) == args.steps:
image_visualizer.update(images)
for action, arrow_plotter in zip(actions, arrow_plotters):
arrow_plotter.update(action)
import IPython as ipy; ipy.embed()
images = []
actions = []
container.close()
if __name__ == "__main__":
main()
|
|
1a534acf6038b35c8bba125c277d349ec967d5bd
|
lc0246_strobogrammatic_number.py
|
lc0246_strobogrammatic_number.py
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
pass
def main():
# Output: true
num = "69"
print Solution().isStrobogrammatic(num)
# Output: true
num = "88"
print Solution().isStrobogrammatic(num)
# Output: false
num = "962"
print Solution().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class SolutionMapDictIter(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Reverse num.
rev_num = num[::-1]
# Convert to mapped number or empty string.
map_d = {
'0': '0',
'1': '1',
'6': '9',
'8': '8',
'9': '6'
}
mapped_num_ls = []
for n in rev_num:
if n in map_d:
mapped_num_ls.append(map_d[n])
else:
mapped_num_ls.append(' ')
mapped_num = ''.join(mapped_num_ls)
# Check if strobogrammatic.
return mapped_num == num
def main():
# Output: true
num = "69"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: true
num = "88"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "962"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "2"
print SolutionMapDictIter().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
Complete map dict sol w/ time/space complexity
|
Complete map dict sol w/ time/space complexity
|
Python
|
bsd-2-clause
|
bowen0701/algorithms_data_structures
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
pass
def main():
# Output: true
num = "69"
print Solution().isStrobogrammatic(num)
# Output: true
num = "88"
print Solution().isStrobogrammatic(num)
# Output: false
num = "962"
print Solution().isStrobogrammatic(num)
if __name__ == '__main__':
main()
Complete map dict sol w/ time/space complexity
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class SolutionMapDictIter(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Reverse num.
rev_num = num[::-1]
# Convert to mapped number or empty string.
map_d = {
'0': '0',
'1': '1',
'6': '9',
'8': '8',
'9': '6'
}
mapped_num_ls = []
for n in rev_num:
if n in map_d:
mapped_num_ls.append(map_d[n])
else:
mapped_num_ls.append(' ')
mapped_num = ''.join(mapped_num_ls)
# Check if strobogrammatic.
return mapped_num == num
def main():
# Output: true
num = "69"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: true
num = "88"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "962"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "2"
print SolutionMapDictIter().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
<commit_before>"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
pass
def main():
# Output: true
num = "69"
print Solution().isStrobogrammatic(num)
# Output: true
num = "88"
print Solution().isStrobogrammatic(num)
# Output: false
num = "962"
print Solution().isStrobogrammatic(num)
if __name__ == '__main__':
main()
<commit_msg>Complete map dict sol w/ time/space complexity<commit_after>
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class SolutionMapDictIter(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Reverse num.
rev_num = num[::-1]
# Convert to mapped number or empty string.
map_d = {
'0': '0',
'1': '1',
'6': '9',
'8': '8',
'9': '6'
}
mapped_num_ls = []
for n in rev_num:
if n in map_d:
mapped_num_ls.append(map_d[n])
else:
mapped_num_ls.append(' ')
mapped_num = ''.join(mapped_num_ls)
# Check if strobogrammatic.
return mapped_num == num
def main():
# Output: true
num = "69"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: true
num = "88"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "962"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "2"
print SolutionMapDictIter().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
pass
def main():
# Output: true
num = "69"
print Solution().isStrobogrammatic(num)
# Output: true
num = "88"
print Solution().isStrobogrammatic(num)
# Output: false
num = "962"
print Solution().isStrobogrammatic(num)
if __name__ == '__main__':
main()
Complete map dict sol w/ time/space complexity"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class SolutionMapDictIter(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Reverse num.
rev_num = num[::-1]
# Convert to mapped number or empty string.
map_d = {
'0': '0',
'1': '1',
'6': '9',
'8': '8',
'9': '6'
}
mapped_num_ls = []
for n in rev_num:
if n in map_d:
mapped_num_ls.append(map_d[n])
else:
mapped_num_ls.append(' ')
mapped_num = ''.join(mapped_num_ls)
# Check if strobogrammatic.
return mapped_num == num
def main():
# Output: true
num = "69"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: true
num = "88"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "962"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "2"
print SolutionMapDictIter().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
<commit_before>"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
pass
def main():
# Output: true
num = "69"
print Solution().isStrobogrammatic(num)
# Output: true
num = "88"
print Solution().isStrobogrammatic(num)
# Output: false
num = "962"
print Solution().isStrobogrammatic(num)
if __name__ == '__main__':
main()
<commit_msg>Complete map dict sol w/ time/space complexity<commit_after>"""Leetcode 246. Strobogrammatic Number
Easy
URL: https://leetcode.com/problems/strobogrammatic-number/A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is
represented as a string.
Example 1:
Input: "69"
Output: true
Example 2:
Input: "88"
Output: true
Example 3:
Input: "962"
Output: false
"""
class SolutionMapDictIter(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Reverse num.
rev_num = num[::-1]
# Convert to mapped number or empty string.
map_d = {
'0': '0',
'1': '1',
'6': '9',
'8': '8',
'9': '6'
}
mapped_num_ls = []
for n in rev_num:
if n in map_d:
mapped_num_ls.append(map_d[n])
else:
mapped_num_ls.append(' ')
mapped_num = ''.join(mapped_num_ls)
# Check if strobogrammatic.
return mapped_num == num
def main():
# Output: true
num = "69"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: true
num = "88"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "962"
print SolutionMapDictIter().isStrobogrammatic(num)
# Output: false
num = "2"
print SolutionMapDictIter().isStrobogrammatic(num)
if __name__ == '__main__':
main()
|
77f4c96b4ee2a3964fb41396cd230b5a35b8ba00
|
dipy/utils/tests/test_tripwire.py
|
dipy/utils/tests/test_tripwire.py
|
""" Testing tripwire module.
"""
from ..tripwire import TripWire, is_tripwire, TripWireError
from nose import SkipTest
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_is_tripwire():
assert_false(is_tripwire(object()))
assert_true(is_tripwire(TripWire('some message')))
def test_tripwire():
# Test tripwire object
silly_module_name = TripWire('We do not have silly_module_name')
assert_raises(TripWireError,
getattr,
silly_module_name,
'do_silly_thing')
# Check AttributeError can be checked too
try:
silly_module_name.__wrapped__
except TripWireError as err:
assert_true(isinstance(err, AttributeError))
else:
raise RuntimeError("No error raised, but expected")
|
Add a test for TripWire.
|
TST: Add a test for TripWire.
|
Python
|
bsd-3-clause
|
nilgoyyou/dipy,FrancoisRheaultUS/dipy,villalonreina/dipy,matthieudumont/dipy,FrancoisRheaultUS/dipy,villalonreina/dipy,StongeEtienne/dipy,matthieudumont/dipy,nilgoyyou/dipy,StongeEtienne/dipy
|
TST: Add a test for TripWire.
|
""" Testing tripwire module.
"""
from ..tripwire import TripWire, is_tripwire, TripWireError
from nose import SkipTest
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_is_tripwire():
assert_false(is_tripwire(object()))
assert_true(is_tripwire(TripWire('some message')))
def test_tripwire():
# Test tripwire object
silly_module_name = TripWire('We do not have silly_module_name')
assert_raises(TripWireError,
getattr,
silly_module_name,
'do_silly_thing')
# Check AttributeError can be checked too
try:
silly_module_name.__wrapped__
except TripWireError as err:
assert_true(isinstance(err, AttributeError))
else:
raise RuntimeError("No error raised, but expected")
|
<commit_before><commit_msg>TST: Add a test for TripWire.<commit_after>
|
""" Testing tripwire module.
"""
from ..tripwire import TripWire, is_tripwire, TripWireError
from nose import SkipTest
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_is_tripwire():
assert_false(is_tripwire(object()))
assert_true(is_tripwire(TripWire('some message')))
def test_tripwire():
# Test tripwire object
silly_module_name = TripWire('We do not have silly_module_name')
assert_raises(TripWireError,
getattr,
silly_module_name,
'do_silly_thing')
# Check AttributeError can be checked too
try:
silly_module_name.__wrapped__
except TripWireError as err:
assert_true(isinstance(err, AttributeError))
else:
raise RuntimeError("No error raised, but expected")
|
TST: Add a test for TripWire.""" Testing tripwire module.
"""
from ..tripwire import TripWire, is_tripwire, TripWireError
from nose import SkipTest
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_is_tripwire():
assert_false(is_tripwire(object()))
assert_true(is_tripwire(TripWire('some message')))
def test_tripwire():
# Test tripwire object
silly_module_name = TripWire('We do not have silly_module_name')
assert_raises(TripWireError,
getattr,
silly_module_name,
'do_silly_thing')
# Check AttributeError can be checked too
try:
silly_module_name.__wrapped__
except TripWireError as err:
assert_true(isinstance(err, AttributeError))
else:
raise RuntimeError("No error raised, but expected")
|
<commit_before><commit_msg>TST: Add a test for TripWire.<commit_after>""" Testing tripwire module.
"""
from ..tripwire import TripWire, is_tripwire, TripWireError
from nose import SkipTest
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_is_tripwire():
assert_false(is_tripwire(object()))
assert_true(is_tripwire(TripWire('some message')))
def test_tripwire():
# Test tripwire object
silly_module_name = TripWire('We do not have silly_module_name')
assert_raises(TripWireError,
getattr,
silly_module_name,
'do_silly_thing')
# Check AttributeError can be checked too
try:
silly_module_name.__wrapped__
except TripWireError as err:
assert_true(isinstance(err, AttributeError))
else:
raise RuntimeError("No error raised, but expected")
|
|
b4292176e1bed76a24a9d1d83a6781c85adea069
|
diet_gtfs.py
|
diet_gtfs.py
|
import csv
import sys
def clean_agency_file(*agencies):
with open('agency.txt', 'r') as f:
reader = csv.reader(f)
next(f)
for row in reader:
if row[0] in agencies:
print(row)
def main():
agencies = sys.argv[1:]
clean_agency_file(*agencies)
if __name__ == '__main__':
main()
|
Add initial code to open agency.txt
|
Add initial code to open agency.txt
|
Python
|
bsd-2-clause
|
sensiblecodeio/diet-gtfs
|
Add initial code to open agency.txt
|
import csv
import sys
def clean_agency_file(*agencies):
with open('agency.txt', 'r') as f:
reader = csv.reader(f)
next(f)
for row in reader:
if row[0] in agencies:
print(row)
def main():
agencies = sys.argv[1:]
clean_agency_file(*agencies)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial code to open agency.txt<commit_after>
|
import csv
import sys
def clean_agency_file(*agencies):
with open('agency.txt', 'r') as f:
reader = csv.reader(f)
next(f)
for row in reader:
if row[0] in agencies:
print(row)
def main():
agencies = sys.argv[1:]
clean_agency_file(*agencies)
if __name__ == '__main__':
main()
|
Add initial code to open agency.txtimport csv
import sys
def clean_agency_file(*agencies):
with open('agency.txt', 'r') as f:
reader = csv.reader(f)
next(f)
for row in reader:
if row[0] in agencies:
print(row)
def main():
agencies = sys.argv[1:]
clean_agency_file(*agencies)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial code to open agency.txt<commit_after>import csv
import sys
def clean_agency_file(*agencies):
with open('agency.txt', 'r') as f:
reader = csv.reader(f)
next(f)
for row in reader:
if row[0] in agencies:
print(row)
def main():
agencies = sys.argv[1:]
clean_agency_file(*agencies)
if __name__ == '__main__':
main()
|
|
a45dbf6720292ea5052a1da022aece11b3523c1e
|
channels/r_slimerancher/app.py
|
channels/r_slimerancher/app.py
|
#encoding:utf-8
from utils import get_url
# Subreddit that will be a source of content
subreddit = 'slimerancher'
# Telegram channel with @reddit2telegram_bot as an admin
t_channel = '@r_slimerancher'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
# If this func returns:
# False – it means that we will not send
# this submission, let's move to the next.
# True – everything is ok, we send the submission
# None – we do not want to send anything this time,
# let's just sleep.
# Get all data from submission that we need
title = submission.title
link = submission.shortlink
text = '{}\n\n{}'.format(title, link)
if what == 'text':
punchline = submission.selftext
text = '{}\n\n{}\n\n\n{}'.format(title, punchline, link)
return r2t.send_text(text)
elif what == 'other':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
return r2t.send_text(text)
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
Add script to handle posts from r_slimerancher
|
Add script to handle posts from r_slimerancher
Based on various app.py fom other channels
Don't know if that's correct cause I've never done anything in python before xD
|
Python
|
mit
|
Fillll/reddit2telegram,nsiregar/reddit2telegram,nsiregar/reddit2telegram,Fillll/reddit2telegram
|
Add script to handle posts from r_slimerancher
Based on various app.py fom other channels
Don't know if that's correct cause I've never done anything in python before xD
|
#encoding:utf-8
from utils import get_url
# Subreddit that will be a source of content
subreddit = 'slimerancher'
# Telegram channel with @reddit2telegram_bot as an admin
t_channel = '@r_slimerancher'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
# If this func returns:
# False – it means that we will not send
# this submission, let's move to the next.
# True – everything is ok, we send the submission
# None – we do not want to send anything this time,
# let's just sleep.
# Get all data from submission that we need
title = submission.title
link = submission.shortlink
text = '{}\n\n{}'.format(title, link)
if what == 'text':
punchline = submission.selftext
text = '{}\n\n{}\n\n\n{}'.format(title, punchline, link)
return r2t.send_text(text)
elif what == 'other':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
return r2t.send_text(text)
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
<commit_before><commit_msg>Add script to handle posts from r_slimerancher
Based on various app.py fom other channels
Don't know if that's correct cause I've never done anything in python before xD<commit_after>
|
#encoding:utf-8
from utils import get_url
# Subreddit that will be a source of content
subreddit = 'slimerancher'
# Telegram channel with @reddit2telegram_bot as an admin
t_channel = '@r_slimerancher'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
# If this func returns:
# False – it means that we will not send
# this submission, let's move to the next.
# True – everything is ok, we send the submission
# None – we do not want to send anything this time,
# let's just sleep.
# Get all data from submission that we need
title = submission.title
link = submission.shortlink
text = '{}\n\n{}'.format(title, link)
if what == 'text':
punchline = submission.selftext
text = '{}\n\n{}\n\n\n{}'.format(title, punchline, link)
return r2t.send_text(text)
elif what == 'other':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
return r2t.send_text(text)
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
Add script to handle posts from r_slimerancher
Based on various app.py fom other channels
Don't know if that's correct cause I've never done anything in python before xD#encoding:utf-8
from utils import get_url
# Subreddit that will be a source of content
subreddit = 'slimerancher'
# Telegram channel with @reddit2telegram_bot as an admin
t_channel = '@r_slimerancher'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
# If this func returns:
# False – it means that we will not send
# this submission, let's move to the next.
# True – everything is ok, we send the submission
# None – we do not want to send anything this time,
# let's just sleep.
# Get all data from submission that we need
title = submission.title
link = submission.shortlink
text = '{}\n\n{}'.format(title, link)
if what == 'text':
punchline = submission.selftext
text = '{}\n\n{}\n\n\n{}'.format(title, punchline, link)
return r2t.send_text(text)
elif what == 'other':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
return r2t.send_text(text)
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
<commit_before><commit_msg>Add script to handle posts from r_slimerancher
Based on various app.py fom other channels
Don't know if that's correct cause I've never done anything in python before xD<commit_after>#encoding:utf-8
from utils import get_url
# Subreddit that will be a source of content
subreddit = 'slimerancher'
# Telegram channel with @reddit2telegram_bot as an admin
t_channel = '@r_slimerancher'
def send_post(submission, r2t):
what, url, ext = get_url(submission)
# If this func returns:
# False – it means that we will not send
# this submission, let's move to the next.
# True – everything is ok, we send the submission
# None – we do not want to send anything this time,
# let's just sleep.
# Get all data from submission that we need
title = submission.title
link = submission.shortlink
text = '{}\n\n{}'.format(title, link)
if what == 'text':
punchline = submission.selftext
text = '{}\n\n{}\n\n\n{}'.format(title, punchline, link)
return r2t.send_text(text)
elif what == 'other':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
return r2t.send_text(text)
elif what == 'album':
base_url = submission.url
text = '{}\n{}\n\n{}'.format(title, base_url, link)
r2t.send_text(text)
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
|
|
ac0fa1df1569f81b0a469f8439cf7f9e916bb208
|
tests/unit/test_compress.py
|
tests/unit/test_compress.py
|
# -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
Add initial basic test for compression
|
Add initial basic test for compression
|
Python
|
apache-2.0
|
s0undt3ch/sorbic,thatch45/sorbic
|
Add initial basic test for compression
|
# -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
<commit_before><commit_msg>Add initial basic test for compression<commit_after>
|
# -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
Add initial basic test for compression# -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
<commit_before><commit_msg>Add initial basic test for compression<commit_after># -*- coding: utf-8 -*-
'''
Test database compression functions
'''
# Import sorbic libs
import sorbic.db
# Import python libs
import os
import shutil
import unittest
import tempfile
class TestCompress(unittest.TestCase):
'''
Cover compression possibilities
'''
def test_compress_no_changes(self):
'''
Run a scale db execution with the given db kwargs
'''
entries = 100
w_dir = tempfile.mkdtemp()
root = os.path.join(w_dir, 'db_root')
db = sorbic.db.DB(root)
data = {1:1}
for num in xrange(entries):
key = str(num)
db.insert(key, data)
db.compress('', 0)
for num in xrange(entries):
key = str(num)
pull_data = db.get(key)
self.assertEqual(data, pull_data)
shutil.rmtree(w_dir)
|
|
8793c14aee7e2fb482a173a8650e2a961ea6cc46
|
scripts/generate_agency_ranking.py
|
scripts/generate_agency_ranking.py
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import Agency, RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route.agency.name].append(rd)
results = []
for agency_name in routedates:
rds = routedates[agency_name]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
agency_name,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i, res in enumerate(results):
desc = res[0]
out = f'{i+1}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
Add script to show agency rankings.
|
Add script to show agency rankings.
|
Python
|
mit
|
katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming
|
Add script to show agency rankings.
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import Agency, RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route.agency.name].append(rd)
results = []
for agency_name in routedates:
rds = routedates[agency_name]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
agency_name,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i, res in enumerate(results):
desc = res[0]
out = f'{i+1}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
<commit_before><commit_msg>Add script to show agency rankings.<commit_after>
|
from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import Agency, RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route.agency.name].append(rd)
results = []
for agency_name in routedates:
rds = routedates[agency_name]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
agency_name,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i, res in enumerate(results):
desc = res[0]
out = f'{i+1}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
Add script to show agency rankings.from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import Agency, RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route.agency.name].append(rd)
results = []
for agency_name in routedates:
rds = routedates[agency_name]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
agency_name,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i, res in enumerate(results):
desc = res[0]
out = f'{i+1}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
<commit_before><commit_msg>Add script to show agency rankings.<commit_after>from collections import defaultdict
import sys
import django
django.setup()
from busshaming.models import Agency, RouteDate, Feed
FEED_SLUG = 'nsw-buses'
MIN_TRIPS = 500
MIN_RT_ENTRIES = 0
def main(is_best, verylate):
feed = Feed.objects.get(slug=FEED_SLUG)
routedates = defaultdict(list)
for rd in RouteDate.objects.filter(route__feed=feed).prefetch_related('route').all():
routedates[rd.route.agency.name].append(rd)
results = []
for agency_name in routedates:
rds = routedates[agency_name]
num_trips = sum([rd.num_scheduled_trips for rd in rds])
if num_trips == 0:
continue
total_ontime = sum([rd.scheduled_trip_ontime_count for rd in rds])
total_verylate = sum([rd.scheduled_trip_verylate_count for rd in rds])
if num_trips < MIN_TRIPS:
continue
result = [
agency_name,
num_trips,
100 * total_ontime / num_trips,
100 * total_verylate / num_trips,
]
results.append(result)
if verylate:
results.sort(key=lambda x: x[3], reverse=not best)
else:
results.sort(key=lambda x: x[2], reverse=best)
for i, res in enumerate(results):
desc = res[0]
out = f'{i+1}\t{res[1]}\t{res[2]:.2f}\t{res[3]:.2f}\t' + desc
print(out)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <best|worst> <ontime|verylate>')
sys.exit(1)
best = sys.argv[1] == 'best'
verylate = sys.argv[2] == 'verylate'
main(best, verylate)
|
|
149ce526143265d1d265761a9fb57bb66ee7a75a
|
functional/tests/volume/v1/test_volume_type.py
|
functional/tests/volume/v1/test_volume_type.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class VolumeTypeTests(test.TestCase):
"""Functional tests for volume type. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('volume type create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('volume type delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_volume_type_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('volume type list' + opts)
self.assertIn(self.NAME, raw_output)
|
Add functional tests for volume type list
|
Add functional tests for volume type list
add tests for `os volume type list`
Change-Id: Icd874b9cfac9376cc410041806fac64f1ff0c59d
|
Python
|
apache-2.0
|
dtroyer/python-openstackclient,dtroyer/python-openstackclient,BjoernT/python-openstackclient,openstack/python-openstackclient,openstack/python-openstackclient,redhat-openstack/python-openstackclient,redhat-openstack/python-openstackclient,BjoernT/python-openstackclient
|
Add functional tests for volume type list
add tests for `os volume type list`
Change-Id: Icd874b9cfac9376cc410041806fac64f1ff0c59d
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class VolumeTypeTests(test.TestCase):
"""Functional tests for volume type. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('volume type create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('volume type delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_volume_type_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('volume type list' + opts)
self.assertIn(self.NAME, raw_output)
|
<commit_before><commit_msg>Add functional tests for volume type list
add tests for `os volume type list`
Change-Id: Icd874b9cfac9376cc410041806fac64f1ff0c59d<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class VolumeTypeTests(test.TestCase):
"""Functional tests for volume type. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('volume type create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('volume type delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_volume_type_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('volume type list' + opts)
self.assertIn(self.NAME, raw_output)
|
Add functional tests for volume type list
add tests for `os volume type list`
Change-Id: Icd874b9cfac9376cc410041806fac64f1ff0c59d# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class VolumeTypeTests(test.TestCase):
"""Functional tests for volume type. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('volume type create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('volume type delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_volume_type_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('volume type list' + opts)
self.assertIn(self.NAME, raw_output)
|
<commit_before><commit_msg>Add functional tests for volume type list
add tests for `os volume type list`
Change-Id: Icd874b9cfac9376cc410041806fac64f1ff0c59d<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class VolumeTypeTests(test.TestCase):
"""Functional tests for volume type. """
NAME = uuid.uuid4().hex
HEADERS = ['"Name"']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('volume type create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_output = cls.openstack('volume type delete ' + cls.NAME)
cls.assertOutput('', raw_output)
def test_volume_type_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('volume type list' + opts)
self.assertIn(self.NAME, raw_output)
|
|
bbf34e5fc3f1728f889065c252b32f175d63cbe1
|
nn_patterns/utils/fileio.py
|
nn_patterns/utils/fileio.py
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import numpy as np
def save_parameters(filename, l):
f = np.savez(filename, *l)
pass
def load_parameters(filename):
f = np.load(filename)
ret = [f["arr_%i" % i] for i in range(len(f.keys()))]
return ret
def store_patterns(filename, p):
d = {}
for prefix in ["A", "r", "mu"]:
if prefix in p:
d.update({"%s_%i" % (prefix, i): x
for i, x in enumerate(p[prefix])})
np.savez(filename, **d)
pass
def load_patterns(filename):
f = np.load(filename)
ret = {}
for prefix in ["A", "r", "mu"]:
l = sum([x.startswith(prefix) for x in f.keys()])
ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]})
return ret
|
Add simple interface to store parameters and patterns across python 2 and 3.
|
Add simple interface to store parameters and patterns across python 2 and 3.
|
Python
|
mit
|
pikinder/nn-patterns
|
Add simple interface to store parameters and patterns across python 2 and 3.
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import numpy as np
def save_parameters(filename, l):
f = np.savez(filename, *l)
pass
def load_parameters(filename):
f = np.load(filename)
ret = [f["arr_%i" % i] for i in range(len(f.keys()))]
return ret
def store_patterns(filename, p):
d = {}
for prefix in ["A", "r", "mu"]:
if prefix in p:
d.update({"%s_%i" % (prefix, i): x
for i, x in enumerate(p[prefix])})
np.savez(filename, **d)
pass
def load_patterns(filename):
f = np.load(filename)
ret = {}
for prefix in ["A", "r", "mu"]:
l = sum([x.startswith(prefix) for x in f.keys()])
ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]})
return ret
|
<commit_before><commit_msg>Add simple interface to store parameters and patterns across python 2 and 3.<commit_after>
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import numpy as np
def save_parameters(filename, l):
f = np.savez(filename, *l)
pass
def load_parameters(filename):
f = np.load(filename)
ret = [f["arr_%i" % i] for i in range(len(f.keys()))]
return ret
def store_patterns(filename, p):
d = {}
for prefix in ["A", "r", "mu"]:
if prefix in p:
d.update({"%s_%i" % (prefix, i): x
for i, x in enumerate(p[prefix])})
np.savez(filename, **d)
pass
def load_patterns(filename):
f = np.load(filename)
ret = {}
for prefix in ["A", "r", "mu"]:
l = sum([x.startswith(prefix) for x in f.keys()])
ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]})
return ret
|
Add simple interface to store parameters and patterns across python 2 and 3.# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import numpy as np
def save_parameters(filename, l):
f = np.savez(filename, *l)
pass
def load_parameters(filename):
f = np.load(filename)
ret = [f["arr_%i" % i] for i in range(len(f.keys()))]
return ret
def store_patterns(filename, p):
d = {}
for prefix in ["A", "r", "mu"]:
if prefix in p:
d.update({"%s_%i" % (prefix, i): x
for i, x in enumerate(p[prefix])})
np.savez(filename, **d)
pass
def load_patterns(filename):
f = np.load(filename)
ret = {}
for prefix in ["A", "r", "mu"]:
l = sum([x.startswith(prefix) for x in f.keys()])
ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]})
return ret
|
<commit_before><commit_msg>Add simple interface to store parameters and patterns across python 2 and 3.<commit_after># Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import numpy as np
def save_parameters(filename, l):
f = np.savez(filename, *l)
pass
def load_parameters(filename):
f = np.load(filename)
ret = [f["arr_%i" % i] for i in range(len(f.keys()))]
return ret
def store_patterns(filename, p):
d = {}
for prefix in ["A", "r", "mu"]:
if prefix in p:
d.update({"%s_%i" % (prefix, i): x
for i, x in enumerate(p[prefix])})
np.savez(filename, **d)
pass
def load_patterns(filename):
f = np.load(filename)
ret = {}
for prefix in ["A", "r", "mu"]:
l = sum([x.startswith(prefix) for x in f.keys()])
ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]})
return ret
|
|
c395da80c02b6c39514fcc46a7b951c71ae2c12b
|
usingnamespace/api/views/v1/root.py
|
usingnamespace/api/views/v1/root.py
|
from pyramid.view import view_config
from ....views.finalisecontext import FinaliseContext
class APIV1(FinaliseContext):
@view_config(context='...traversal.v1.Root', route_name='api', renderer='json')
def main(self):
sites = []
for site in self.context.sites:
sites.append(
{
'id': site.id,
'title': site.title,
'tagline': site.tagline,
}
)
return {
'sites': sites,
}
|
Add view for API v1 Root
|
Add view for API v1 Root
This sends back a JSON response contain sites with ID's as well as
title/tagline.
|
Python
|
isc
|
usingnamespace/usingnamespace
|
Add view for API v1 Root
This sends back a JSON response contain sites with ID's as well as
title/tagline.
|
from pyramid.view import view_config
from ....views.finalisecontext import FinaliseContext
class APIV1(FinaliseContext):
@view_config(context='...traversal.v1.Root', route_name='api', renderer='json')
def main(self):
sites = []
for site in self.context.sites:
sites.append(
{
'id': site.id,
'title': site.title,
'tagline': site.tagline,
}
)
return {
'sites': sites,
}
|
<commit_before><commit_msg>Add view for API v1 Root
This sends back a JSON response contain sites with ID's as well as
title/tagline.<commit_after>
|
from pyramid.view import view_config
from ....views.finalisecontext import FinaliseContext
class APIV1(FinaliseContext):
@view_config(context='...traversal.v1.Root', route_name='api', renderer='json')
def main(self):
sites = []
for site in self.context.sites:
sites.append(
{
'id': site.id,
'title': site.title,
'tagline': site.tagline,
}
)
return {
'sites': sites,
}
|
Add view for API v1 Root
This sends back a JSON response contain sites with ID's as well as
title/tagline.from pyramid.view import view_config
from ....views.finalisecontext import FinaliseContext
class APIV1(FinaliseContext):
@view_config(context='...traversal.v1.Root', route_name='api', renderer='json')
def main(self):
sites = []
for site in self.context.sites:
sites.append(
{
'id': site.id,
'title': site.title,
'tagline': site.tagline,
}
)
return {
'sites': sites,
}
|
<commit_before><commit_msg>Add view for API v1 Root
This sends back a JSON response contain sites with ID's as well as
title/tagline.<commit_after>from pyramid.view import view_config
from ....views.finalisecontext import FinaliseContext
class APIV1(FinaliseContext):
@view_config(context='...traversal.v1.Root', route_name='api', renderer='json')
def main(self):
sites = []
for site in self.context.sites:
sites.append(
{
'id': site.id,
'title': site.title,
'tagline': site.tagline,
}
)
return {
'sites': sites,
}
|
|
da16bec07e245e440acea629ad953e4a56085f7e
|
scripts/util.py
|
scripts/util.py
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel
_manager.setup()
logger = logging.getLogger(__name__)
def documents(*sources):
q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
def next_page(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel, DocumentModelV2
_manager.setup()
logger = logging.getLogger(__name__)
def ModelIteratorFactory(model, next_page):
def model_iterator(*sources):
q = model.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
return model_iterator
def next_page_v1(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def next_page_v2(query, page):
return list(query.filter(docID__gt=page[-1].docID))
documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1)
documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2)
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
Add ability to iterate over old document model and new document model for migrations
|
Add ability to iterate over old document model and new document model for migrations
|
Python
|
apache-2.0
|
CenterForOpenScience/scrapi,ostwald/scrapi,fabianvf/scrapi,mehanig/scrapi,alexgarciac/scrapi,felliott/scrapi,icereval/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,mehanig/scrapi,erinspace/scrapi,felliott/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel
_manager.setup()
logger = logging.getLogger(__name__)
def documents(*sources):
q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
def next_page(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
Add ability to iterate over old document model and new document model for migrations
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel, DocumentModelV2
_manager.setup()
logger = logging.getLogger(__name__)
def ModelIteratorFactory(model, next_page):
def model_iterator(*sources):
q = model.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
return model_iterator
def next_page_v1(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def next_page_v2(query, page):
return list(query.filter(docID__gt=page[-1].docID))
documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1)
documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2)
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
<commit_before>import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel
_manager.setup()
logger = logging.getLogger(__name__)
def documents(*sources):
q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
def next_page(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
<commit_msg>Add ability to iterate over old document model and new document model for migrations<commit_after>
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel, DocumentModelV2
_manager.setup()
logger = logging.getLogger(__name__)
def ModelIteratorFactory(model, next_page):
def model_iterator(*sources):
q = model.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
return model_iterator
def next_page_v1(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def next_page_v2(query, page):
return list(query.filter(docID__gt=page[-1].docID))
documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1)
documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2)
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel
_manager.setup()
logger = logging.getLogger(__name__)
def documents(*sources):
q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
def next_page(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
Add ability to iterate over old document model and new document model for migrationsimport time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel, DocumentModelV2
_manager.setup()
logger = logging.getLogger(__name__)
def ModelIteratorFactory(model, next_page):
def model_iterator(*sources):
q = model.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
return model_iterator
def next_page_v1(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def next_page_v2(query, page):
return list(query.filter(docID__gt=page[-1].docID))
documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1)
documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2)
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
<commit_before>import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel
_manager.setup()
logger = logging.getLogger(__name__)
def documents(*sources):
q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
def next_page(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
<commit_msg>Add ability to iterate over old document model and new document model for migrations<commit_after>import time
import logging
from cassandra.cqlengine.query import Token
from scrapi.database import _manager
from scrapi.processing.cassandra import DocumentModel, DocumentModelV2
_manager.setup()
logger = logging.getLogger(__name__)
def ModelIteratorFactory(model, next_page):
def model_iterator(*sources):
q = model.objects.timeout(500).allow_filtering().all().limit(1000)
querysets = (q.filter(source=source) for source in sources) if sources else [q]
for query in querysets:
page = try_forever(list, query)
while len(page) > 0:
for doc in page:
yield doc
page = try_forever(next_page, query, page)
return model_iterator
def next_page_v1(query, page):
return list(query.filter(pk__token__gt=Token(page[-1].pk)))
def next_page_v2(query, page):
return list(query.filter(docID__gt=page[-1].docID))
documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1)
documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2)
def try_forever(action, *args, **kwargs):
while True:
try:
return action(*args, **kwargs)
except Exception as e:
logger.exception(e)
time.sleep(5)
logger.info("Trying again...")
|
c1bec6e99e04785e102d4de1e1a82d8a78d2b9ff
|
config/fuzz_pox_proactive.py
|
config/fuzz_pox_proactive.py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import FatTree
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.util.convenience import backtick
def get_additional_metadata():
path = "dart_pox"
return {
'commit' : backtick("git rev-parse HEAD", cwd=path),
'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path),
'remote' : backtick("git remote show origin", cwd=path),
}
# Use POX as our controller
start_cmd = ('''./pox.py --verbose --unthreaded-sh '''
'''sts.util.socket_mux.pox_monkeypatcher '''
# --snapshot_address=/Users/cs/Research/UCB/code/sts/snapshot_socket'''
#'''sts.syncproto.pox_syncer --blocking=False '''
'''openflow.discovery forwarding.topo_proactive '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="dart_pox")]
topology_class = FatTree
topology_params = "num_pods=3,use_portland_addressing=False"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
multiplex_sockets=True,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config, check_interval=1,
halt_on_violation=True,
input_logger=InputLogger(),
invariant_check_name="check_for_invalid_ports")
|
Add config for fuzzing POX proactive mode
|
Add config for fuzzing POX proactive mode
|
Python
|
apache-2.0
|
ucb-sts/sts,jmiserez/sts,jmiserez/sts,ucb-sts/sts
|
Add config for fuzzing POX proactive mode
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import FatTree
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.util.convenience import backtick
def get_additional_metadata():
path = "dart_pox"
return {
'commit' : backtick("git rev-parse HEAD", cwd=path),
'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path),
'remote' : backtick("git remote show origin", cwd=path),
}
# Use POX as our controller
start_cmd = ('''./pox.py --verbose --unthreaded-sh '''
'''sts.util.socket_mux.pox_monkeypatcher '''
# --snapshot_address=/Users/cs/Research/UCB/code/sts/snapshot_socket'''
#'''sts.syncproto.pox_syncer --blocking=False '''
'''openflow.discovery forwarding.topo_proactive '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="dart_pox")]
topology_class = FatTree
topology_params = "num_pods=3,use_portland_addressing=False"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
multiplex_sockets=True,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config, check_interval=1,
halt_on_violation=True,
input_logger=InputLogger(),
invariant_check_name="check_for_invalid_ports")
|
<commit_before><commit_msg>Add config for fuzzing POX proactive mode<commit_after>
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import FatTree
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.util.convenience import backtick
def get_additional_metadata():
path = "dart_pox"
return {
'commit' : backtick("git rev-parse HEAD", cwd=path),
'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path),
'remote' : backtick("git remote show origin", cwd=path),
}
# Use POX as our controller
start_cmd = ('''./pox.py --verbose --unthreaded-sh '''
'''sts.util.socket_mux.pox_monkeypatcher '''
# --snapshot_address=/Users/cs/Research/UCB/code/sts/snapshot_socket'''
#'''sts.syncproto.pox_syncer --blocking=False '''
'''openflow.discovery forwarding.topo_proactive '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="dart_pox")]
topology_class = FatTree
topology_params = "num_pods=3,use_portland_addressing=False"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
multiplex_sockets=True,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config, check_interval=1,
halt_on_violation=True,
input_logger=InputLogger(),
invariant_check_name="check_for_invalid_ports")
|
Add config for fuzzing POX proactive mode
from config.experiment_config_lib import ControllerConfig
from sts.topology import FatTree
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.util.convenience import backtick
def get_additional_metadata():
path = "dart_pox"
return {
'commit' : backtick("git rev-parse HEAD", cwd=path),
'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path),
'remote' : backtick("git remote show origin", cwd=path),
}
# Use POX as our controller
start_cmd = ('''./pox.py --verbose --unthreaded-sh '''
'''sts.util.socket_mux.pox_monkeypatcher '''
# --snapshot_address=/Users/cs/Research/UCB/code/sts/snapshot_socket'''
#'''sts.syncproto.pox_syncer --blocking=False '''
'''openflow.discovery forwarding.topo_proactive '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="dart_pox")]
topology_class = FatTree
topology_params = "num_pods=3,use_portland_addressing=False"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
multiplex_sockets=True,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config, check_interval=1,
halt_on_violation=True,
input_logger=InputLogger(),
invariant_check_name="check_for_invalid_ports")
|
<commit_before><commit_msg>Add config for fuzzing POX proactive mode<commit_after>
from config.experiment_config_lib import ControllerConfig
from sts.topology import FatTree
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.util.convenience import backtick
def get_additional_metadata():
path = "dart_pox"
return {
'commit' : backtick("git rev-parse HEAD", cwd=path),
'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path),
'remote' : backtick("git remote show origin", cwd=path),
}
# Use POX as our controller
start_cmd = ('''./pox.py --verbose --unthreaded-sh '''
'''sts.util.socket_mux.pox_monkeypatcher '''
# --snapshot_address=/Users/cs/Research/UCB/code/sts/snapshot_socket'''
#'''sts.syncproto.pox_syncer --blocking=False '''
'''openflow.discovery forwarding.topo_proactive '''
'''openflow.of_01 --address=__address__ --port=__port__''')
controllers = [ControllerConfig(start_cmd, cwd="dart_pox")]
topology_class = FatTree
topology_params = "num_pods=3,use_portland_addressing=False"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
multiplex_sockets=True,
topology_params=topology_params)
control_flow = Fuzzer(simulation_config, check_interval=1,
halt_on_violation=True,
input_logger=InputLogger(),
invariant_check_name="check_for_invalid_ports")
|
|
1293197f86e85d346b65f5fb065ce6099031a5d3
|
src/ggrc_basic_permissions/migrations/versions/20131210004352_1a22bb208258_auditors_have_docume.py
|
src/ggrc_basic_permissions/migrations/versions/20131210004352_1a22bb208258_auditors_have_docume.py
|
"""Auditors have document and meeting permissions in audit context.
Revision ID: 1a22bb208258
Revises: 1f865f61312
Create Date: 2013-12-10 00:43:52.151598
"""
# revision identifiers, used by Alembic.
revision = '1a22bb208258'
down_revision = '1f865f61312'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].extend(['Document', 'Meeting'])
update_role_permissions('Auditor', permissions)
def downgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].remove('Document')
permissions['read'].remove('Meeting')
update_role_permissions('Auditor', permissions)
|
Add permission to read Document and Meeting resources in the Audit context for Auditor role.
|
Add permission to read Document and Meeting resources in the Audit
context for Auditor role.
|
Python
|
apache-2.0
|
uskudnik/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,vladan-m/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core
|
Add permission to read Document and Meeting resources in the Audit
context for Auditor role.
|
"""Auditors have document and meeting permissions in audit context.
Revision ID: 1a22bb208258
Revises: 1f865f61312
Create Date: 2013-12-10 00:43:52.151598
"""
# revision identifiers, used by Alembic.
revision = '1a22bb208258'
down_revision = '1f865f61312'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].extend(['Document', 'Meeting'])
update_role_permissions('Auditor', permissions)
def downgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].remove('Document')
permissions['read'].remove('Meeting')
update_role_permissions('Auditor', permissions)
|
<commit_before><commit_msg>Add permission to read Document and Meeting resources in the Audit
context for Auditor role.<commit_after>
|
"""Auditors have document and meeting permissions in audit context.
Revision ID: 1a22bb208258
Revises: 1f865f61312
Create Date: 2013-12-10 00:43:52.151598
"""
# revision identifiers, used by Alembic.
revision = '1a22bb208258'
down_revision = '1f865f61312'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].extend(['Document', 'Meeting'])
update_role_permissions('Auditor', permissions)
def downgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].remove('Document')
permissions['read'].remove('Meeting')
update_role_permissions('Auditor', permissions)
|
Add permission to read Document and Meeting resources in the Audit
context for Auditor role.
"""Auditors have document and meeting permissions in audit context.
Revision ID: 1a22bb208258
Revises: 1f865f61312
Create Date: 2013-12-10 00:43:52.151598
"""
# revision identifiers, used by Alembic.
revision = '1a22bb208258'
down_revision = '1f865f61312'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].extend(['Document', 'Meeting'])
update_role_permissions('Auditor', permissions)
def downgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].remove('Document')
permissions['read'].remove('Meeting')
update_role_permissions('Auditor', permissions)
|
<commit_before><commit_msg>Add permission to read Document and Meeting resources in the Audit
context for Auditor role.<commit_after>
"""Auditors have document and meeting permissions in audit context.
Revision ID: 1a22bb208258
Revises: 1f865f61312
Create Date: 2013-12-10 00:43:52.151598
"""
# revision identifiers, used by Alembic.
revision = '1a22bb208258'
down_revision = '1f865f61312'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].extend(['Document', 'Meeting'])
update_role_permissions('Auditor', permissions)
def downgrade():
permissions = get_role_permissions('Auditor')
permissions['read'].remove('Document')
permissions['read'].remove('Meeting')
update_role_permissions('Auditor', permissions)
|
|
c7bbec07e9b1bd87a48bdae87071b59c0a575153
|
test/test_conn.py
|
test/test_conn.py
|
# pylint: skip-file
from __future__ import absolute_import
from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET
import socket
import time
import pytest
from kafka.conn import BrokerConnection, ConnectionStates
@pytest.fixture
def socket(mocker):
socket = mocker.MagicMock()
socket.connect_ex.return_value = 0
mocker.patch('socket.socket', return_value=socket)
return socket
@pytest.fixture
def conn(socket):
conn = BrokerConnection('localhost', 9092, socket.AF_INET)
return conn
@pytest.mark.parametrize("states", [
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),),
(([EALREADY, EALREADY], ConnectionStates.CONNECTING),),
(([0], ConnectionStates.CONNECTED),),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([ECONNRESET], ConnectionStates.DISCONNECTED)),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([EALREADY], ConnectionStates.CONNECTING),
([EISCONN], ConnectionStates.CONNECTED)),
])
def test_connect(socket, conn, states):
assert conn.state is ConnectionStates.DISCONNECTED
for errno, state in states:
socket.connect_ex.side_effect = errno
conn.connect()
assert conn.state is state
def test_connect_timeout(socket, conn):
assert conn.state is ConnectionStates.DISCONNECTED
# Initial connect returns EINPROGRESS
# immediate inline connect returns EALREADY
# second explicit connect returns EALREADY
# third explicit connect returns EALREADY and times out via last_attempt
socket.connect_ex.side_effect = [EINPROGRESS, EALREADY, EALREADY, EALREADY]
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.last_attempt = 0
conn.connect()
assert conn.state is ConnectionStates.DISCONNECTED
def test_blacked_out(conn):
assert not conn.blacked_out()
conn.last_attempt = time.time()
assert conn.blacked_out()
def test_connected(conn):
assert not conn.connected()
conn.state = ConnectionStates.CONNECTED
assert conn.connected()
def test_connecting(conn):
assert not conn.connecting()
conn.state = ConnectionStates.CONNECTING
assert conn.connecting()
conn.state = ConnectionStates.CONNECTED
assert not conn.connecting()
# TODO: test_send, test_recv, test_can_send_more, test_close
|
Add basic unit test coverage for BrokerConnection
|
Add basic unit test coverage for BrokerConnection
|
Python
|
apache-2.0
|
mumrah/kafka-python,scrapinghub/kafka-python,Yelp/kafka-python,dpkp/kafka-python,wikimedia/operations-debs-python-kafka,DataDog/kafka-python,scrapinghub/kafka-python,wikimedia/operations-debs-python-kafka,ohmu/kafka-python,Aloomaio/kafka-python,zackdever/kafka-python,mumrah/kafka-python,Yelp/kafka-python,dpkp/kafka-python,zackdever/kafka-python,Aloomaio/kafka-python,ohmu/kafka-python
|
Add basic unit test coverage for BrokerConnection
|
# pylint: skip-file
from __future__ import absolute_import
from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET
import socket
import time
import pytest
from kafka.conn import BrokerConnection, ConnectionStates
@pytest.fixture
def socket(mocker):
socket = mocker.MagicMock()
socket.connect_ex.return_value = 0
mocker.patch('socket.socket', return_value=socket)
return socket
@pytest.fixture
def conn(socket):
conn = BrokerConnection('localhost', 9092, socket.AF_INET)
return conn
@pytest.mark.parametrize("states", [
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),),
(([EALREADY, EALREADY], ConnectionStates.CONNECTING),),
(([0], ConnectionStates.CONNECTED),),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([ECONNRESET], ConnectionStates.DISCONNECTED)),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([EALREADY], ConnectionStates.CONNECTING),
([EISCONN], ConnectionStates.CONNECTED)),
])
def test_connect(socket, conn, states):
assert conn.state is ConnectionStates.DISCONNECTED
for errno, state in states:
socket.connect_ex.side_effect = errno
conn.connect()
assert conn.state is state
def test_connect_timeout(socket, conn):
assert conn.state is ConnectionStates.DISCONNECTED
# Initial connect returns EINPROGRESS
# immediate inline connect returns EALREADY
# second explicit connect returns EALREADY
# third explicit connect returns EALREADY and times out via last_attempt
socket.connect_ex.side_effect = [EINPROGRESS, EALREADY, EALREADY, EALREADY]
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.last_attempt = 0
conn.connect()
assert conn.state is ConnectionStates.DISCONNECTED
def test_blacked_out(conn):
assert not conn.blacked_out()
conn.last_attempt = time.time()
assert conn.blacked_out()
def test_connected(conn):
assert not conn.connected()
conn.state = ConnectionStates.CONNECTED
assert conn.connected()
def test_connecting(conn):
assert not conn.connecting()
conn.state = ConnectionStates.CONNECTING
assert conn.connecting()
conn.state = ConnectionStates.CONNECTED
assert not conn.connecting()
# TODO: test_send, test_recv, test_can_send_more, test_close
|
<commit_before><commit_msg>Add basic unit test coverage for BrokerConnection<commit_after>
|
# pylint: skip-file
from __future__ import absolute_import
from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET
import socket
import time
import pytest
from kafka.conn import BrokerConnection, ConnectionStates
@pytest.fixture
def socket(mocker):
socket = mocker.MagicMock()
socket.connect_ex.return_value = 0
mocker.patch('socket.socket', return_value=socket)
return socket
@pytest.fixture
def conn(socket):
conn = BrokerConnection('localhost', 9092, socket.AF_INET)
return conn
@pytest.mark.parametrize("states", [
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),),
(([EALREADY, EALREADY], ConnectionStates.CONNECTING),),
(([0], ConnectionStates.CONNECTED),),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([ECONNRESET], ConnectionStates.DISCONNECTED)),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([EALREADY], ConnectionStates.CONNECTING),
([EISCONN], ConnectionStates.CONNECTED)),
])
def test_connect(socket, conn, states):
assert conn.state is ConnectionStates.DISCONNECTED
for errno, state in states:
socket.connect_ex.side_effect = errno
conn.connect()
assert conn.state is state
def test_connect_timeout(socket, conn):
assert conn.state is ConnectionStates.DISCONNECTED
# Initial connect returns EINPROGRESS
# immediate inline connect returns EALREADY
# second explicit connect returns EALREADY
# third explicit connect returns EALREADY and times out via last_attempt
socket.connect_ex.side_effect = [EINPROGRESS, EALREADY, EALREADY, EALREADY]
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.last_attempt = 0
conn.connect()
assert conn.state is ConnectionStates.DISCONNECTED
def test_blacked_out(conn):
assert not conn.blacked_out()
conn.last_attempt = time.time()
assert conn.blacked_out()
def test_connected(conn):
assert not conn.connected()
conn.state = ConnectionStates.CONNECTED
assert conn.connected()
def test_connecting(conn):
assert not conn.connecting()
conn.state = ConnectionStates.CONNECTING
assert conn.connecting()
conn.state = ConnectionStates.CONNECTED
assert not conn.connecting()
# TODO: test_send, test_recv, test_can_send_more, test_close
|
Add basic unit test coverage for BrokerConnection# pylint: skip-file
from __future__ import absolute_import
from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET
import socket
import time
import pytest
from kafka.conn import BrokerConnection, ConnectionStates
@pytest.fixture
def socket(mocker):
socket = mocker.MagicMock()
socket.connect_ex.return_value = 0
mocker.patch('socket.socket', return_value=socket)
return socket
@pytest.fixture
def conn(socket):
conn = BrokerConnection('localhost', 9092, socket.AF_INET)
return conn
@pytest.mark.parametrize("states", [
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),),
(([EALREADY, EALREADY], ConnectionStates.CONNECTING),),
(([0], ConnectionStates.CONNECTED),),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([ECONNRESET], ConnectionStates.DISCONNECTED)),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([EALREADY], ConnectionStates.CONNECTING),
([EISCONN], ConnectionStates.CONNECTED)),
])
def test_connect(socket, conn, states):
assert conn.state is ConnectionStates.DISCONNECTED
for errno, state in states:
socket.connect_ex.side_effect = errno
conn.connect()
assert conn.state is state
def test_connect_timeout(socket, conn):
assert conn.state is ConnectionStates.DISCONNECTED
# Initial connect returns EINPROGRESS
# immediate inline connect returns EALREADY
# second explicit connect returns EALREADY
# third explicit connect returns EALREADY and times out via last_attempt
socket.connect_ex.side_effect = [EINPROGRESS, EALREADY, EALREADY, EALREADY]
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.last_attempt = 0
conn.connect()
assert conn.state is ConnectionStates.DISCONNECTED
def test_blacked_out(conn):
assert not conn.blacked_out()
conn.last_attempt = time.time()
assert conn.blacked_out()
def test_connected(conn):
assert not conn.connected()
conn.state = ConnectionStates.CONNECTED
assert conn.connected()
def test_connecting(conn):
assert not conn.connecting()
conn.state = ConnectionStates.CONNECTING
assert conn.connecting()
conn.state = ConnectionStates.CONNECTED
assert not conn.connecting()
# TODO: test_send, test_recv, test_can_send_more, test_close
|
<commit_before><commit_msg>Add basic unit test coverage for BrokerConnection<commit_after># pylint: skip-file
from __future__ import absolute_import
from errno import EALREADY, EINPROGRESS, EISCONN, ECONNRESET
import socket
import time
import pytest
from kafka.conn import BrokerConnection, ConnectionStates
@pytest.fixture
def socket(mocker):
socket = mocker.MagicMock()
socket.connect_ex.return_value = 0
mocker.patch('socket.socket', return_value=socket)
return socket
@pytest.fixture
def conn(socket):
conn = BrokerConnection('localhost', 9092, socket.AF_INET)
return conn
@pytest.mark.parametrize("states", [
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),),
(([EALREADY, EALREADY], ConnectionStates.CONNECTING),),
(([0], ConnectionStates.CONNECTED),),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([ECONNRESET], ConnectionStates.DISCONNECTED)),
(([EINPROGRESS, EALREADY], ConnectionStates.CONNECTING),
([EALREADY], ConnectionStates.CONNECTING),
([EISCONN], ConnectionStates.CONNECTED)),
])
def test_connect(socket, conn, states):
assert conn.state is ConnectionStates.DISCONNECTED
for errno, state in states:
socket.connect_ex.side_effect = errno
conn.connect()
assert conn.state is state
def test_connect_timeout(socket, conn):
assert conn.state is ConnectionStates.DISCONNECTED
# Initial connect returns EINPROGRESS
# immediate inline connect returns EALREADY
# second explicit connect returns EALREADY
# third explicit connect returns EALREADY and times out via last_attempt
socket.connect_ex.side_effect = [EINPROGRESS, EALREADY, EALREADY, EALREADY]
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.connect()
assert conn.state is ConnectionStates.CONNECTING
conn.last_attempt = 0
conn.connect()
assert conn.state is ConnectionStates.DISCONNECTED
def test_blacked_out(conn):
assert not conn.blacked_out()
conn.last_attempt = time.time()
assert conn.blacked_out()
def test_connected(conn):
assert not conn.connected()
conn.state = ConnectionStates.CONNECTED
assert conn.connected()
def test_connecting(conn):
assert not conn.connecting()
conn.state = ConnectionStates.CONNECTING
assert conn.connecting()
conn.state = ConnectionStates.CONNECTED
assert not conn.connecting()
# TODO: test_send, test_recv, test_can_send_more, test_close
|
|
8fe3932189d483efc72e0d34a9417ffbb34f5809
|
test/test_sync.py
|
test/test_sync.py
|
import unittest
import canopen
class TestSync(unittest.TestCase):
def test_sync_producer(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit()
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 0)
def test_sync_producer_counter(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit(2)
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 1)
self.assertEqual(msg.data, b"\x02")
if __name__ == "__main__":
unittest.main()
|
Add unit test for SYNC producer
|
Add unit test for SYNC producer
Test one-shot SYNC transmission with and without counter value.
|
Python
|
mit
|
christiansandberg/canopen,christiansandberg/canopen
|
Add unit test for SYNC producer
Test one-shot SYNC transmission with and without counter value.
|
import unittest
import canopen
class TestSync(unittest.TestCase):
def test_sync_producer(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit()
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 0)
def test_sync_producer_counter(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit(2)
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 1)
self.assertEqual(msg.data, b"\x02")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for SYNC producer
Test one-shot SYNC transmission with and without counter value.<commit_after>
|
import unittest
import canopen
class TestSync(unittest.TestCase):
def test_sync_producer(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit()
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 0)
def test_sync_producer_counter(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit(2)
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 1)
self.assertEqual(msg.data, b"\x02")
if __name__ == "__main__":
unittest.main()
|
Add unit test for SYNC producer
Test one-shot SYNC transmission with and without counter value.import unittest
import canopen
class TestSync(unittest.TestCase):
def test_sync_producer(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit()
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 0)
def test_sync_producer_counter(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit(2)
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 1)
self.assertEqual(msg.data, b"\x02")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for SYNC producer
Test one-shot SYNC transmission with and without counter value.<commit_after>import unittest
import canopen
class TestSync(unittest.TestCase):
def test_sync_producer(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit()
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 0)
def test_sync_producer_counter(self):
network = canopen.Network()
network.connect(bustype="virtual", receive_own_messages=True)
producer = canopen.sync.SyncProducer(network)
producer.transmit(2)
msg = network.bus.recv(1)
network.disconnect()
self.assertEqual(msg.arbitration_id, 0x80)
self.assertEqual(msg.dlc, 1)
self.assertEqual(msg.data, b"\x02")
if __name__ == "__main__":
unittest.main()
|
|
32e66fe95a60becdb3e15152f2cea1bad0ac8460
|
artists/migrations/0001_initial.py
|
artists/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JamendoArtist',
fields=[
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(unique=True, max_length=100)),
('website', models.URLField()),
('joindate', models.DateField()),
('image', models.URLField()),
('shorturl', models.URLField()),
('shareurl', models.URLField()),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MagnatuneArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('artist', models.CharField(unique=True, max_length=100)),
('description', models.TextField(max_length=500)),
('bio', models.TextField(max_length=12000)),
('homepage', models.CharField(max_length=100)),
('bandphoto', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
Add migrations for artists app
|
Add migrations for artists app
|
Python
|
bsd-3-clause
|
FreeMusicNinja/api.freemusic.ninja
|
Add migrations for artists app
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JamendoArtist',
fields=[
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(unique=True, max_length=100)),
('website', models.URLField()),
('joindate', models.DateField()),
('image', models.URLField()),
('shorturl', models.URLField()),
('shareurl', models.URLField()),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MagnatuneArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('artist', models.CharField(unique=True, max_length=100)),
('description', models.TextField(max_length=500)),
('bio', models.TextField(max_length=12000)),
('homepage', models.CharField(max_length=100)),
('bandphoto', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
<commit_before><commit_msg>Add migrations for artists app<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JamendoArtist',
fields=[
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(unique=True, max_length=100)),
('website', models.URLField()),
('joindate', models.DateField()),
('image', models.URLField()),
('shorturl', models.URLField()),
('shareurl', models.URLField()),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MagnatuneArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('artist', models.CharField(unique=True, max_length=100)),
('description', models.TextField(max_length=500)),
('bio', models.TextField(max_length=12000)),
('homepage', models.CharField(max_length=100)),
('bandphoto', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
Add migrations for artists app# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JamendoArtist',
fields=[
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(unique=True, max_length=100)),
('website', models.URLField()),
('joindate', models.DateField()),
('image', models.URLField()),
('shorturl', models.URLField()),
('shareurl', models.URLField()),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MagnatuneArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('artist', models.CharField(unique=True, max_length=100)),
('description', models.TextField(max_length=500)),
('bio', models.TextField(max_length=12000)),
('homepage', models.CharField(max_length=100)),
('bandphoto', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
<commit_before><commit_msg>Add migrations for artists app<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JamendoArtist',
fields=[
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(unique=True, max_length=100)),
('website', models.URLField()),
('joindate', models.DateField()),
('image', models.URLField()),
('shorturl', models.URLField()),
('shareurl', models.URLField()),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MagnatuneArtist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('artist', models.CharField(unique=True, max_length=100)),
('description', models.TextField(max_length=500)),
('bio', models.TextField(max_length=12000)),
('homepage', models.CharField(max_length=100)),
('bandphoto', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
|
9008062fc37dcc2f4aefe570daf94018b7e76223
|
hiora_cartpole/fourier_fa_int.py
|
hiora_cartpole/fourier_fa_int.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import numpy as np
import sympy as sp
def dot(es1, es2):
return sum([e[0]*e[1] for e in zip(es1, es2)])
# Copied from hiora_cartpole.fourier_fa
def c_matrix(order, n_dims):
"""
Generates the parameter (C) vectors for all terms in the Fourier FA.
"""
# All entries from cartesian product {0, …, order+1}^n_dims.
return np.array(
list( itertools.product(range(order+1), repeat=n_dims) ),
dtype=np.int32)
def sum_term(integral, c, c_vec):
return integral.subs(zip(c, c_vec))
def make_sym_Q_s0(state_ranges, order):
n_dims = state_ranges.shape[0]
C = sp.symbols("c0:" + n_dims, integer=True)
S = sp.symbols("s0:" + n_dims, real=True)
integral = reduce(lambda f, s: sp.Integral(f, (s, 0, 1)),
S[1:], sp.cos(sp.pi * dot(S, C))).doit()
sum_terms = [sum_term(integral, C, c_vec)
for c_vec in c_matrix(order, n_dims)]
np_sum_terms = [sp.lambdify(S[0], t, 'numpy')
for t in sum_terms]
def sym_Q_s0_inner(theta, a, s0):
ns0 = (s0 - state_ranges[0]) / (state_ranges[1] - state_ranges[0])
theta_a = theta[a * theta.shape[0]:(a+1) * theta.shape[0]]
return np.dot(theta_a,
np.array[(npst(ns0) for npst in np_sum_terms)])
return sym_Q_s0_inner
|
Add module symbolic Fourier FA integration
|
Add module symbolic Fourier FA integration
|
Python
|
mit
|
rmoehn/cartpole
|
Add module symbolic Fourier FA integration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import numpy as np
import sympy as sp
def dot(es1, es2):
return sum([e[0]*e[1] for e in zip(es1, es2)])
# Copied from hiora_cartpole.fourier_fa
def c_matrix(order, n_dims):
"""
Generates the parameter (C) vectors for all terms in the Fourier FA.
"""
# All entries from cartesian product {0, …, order+1}^n_dims.
return np.array(
list( itertools.product(range(order+1), repeat=n_dims) ),
dtype=np.int32)
def sum_term(integral, c, c_vec):
return integral.subs(zip(c, c_vec))
def make_sym_Q_s0(state_ranges, order):
n_dims = state_ranges.shape[0]
C = sp.symbols("c0:" + n_dims, integer=True)
S = sp.symbols("s0:" + n_dims, real=True)
integral = reduce(lambda f, s: sp.Integral(f, (s, 0, 1)),
S[1:], sp.cos(sp.pi * dot(S, C))).doit()
sum_terms = [sum_term(integral, C, c_vec)
for c_vec in c_matrix(order, n_dims)]
np_sum_terms = [sp.lambdify(S[0], t, 'numpy')
for t in sum_terms]
def sym_Q_s0_inner(theta, a, s0):
ns0 = (s0 - state_ranges[0]) / (state_ranges[1] - state_ranges[0])
theta_a = theta[a * theta.shape[0]:(a+1) * theta.shape[0]]
return np.dot(theta_a,
np.array[(npst(ns0) for npst in np_sum_terms)])
return sym_Q_s0_inner
|
<commit_before><commit_msg>Add module symbolic Fourier FA integration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import numpy as np
import sympy as sp
def dot(es1, es2):
return sum([e[0]*e[1] for e in zip(es1, es2)])
# Copied from hiora_cartpole.fourier_fa
def c_matrix(order, n_dims):
"""
Generates the parameter (C) vectors for all terms in the Fourier FA.
"""
# All entries from cartesian product {0, …, order+1}^n_dims.
return np.array(
list( itertools.product(range(order+1), repeat=n_dims) ),
dtype=np.int32)
def sum_term(integral, c, c_vec):
return integral.subs(zip(c, c_vec))
def make_sym_Q_s0(state_ranges, order):
n_dims = state_ranges.shape[0]
C = sp.symbols("c0:" + n_dims, integer=True)
S = sp.symbols("s0:" + n_dims, real=True)
integral = reduce(lambda f, s: sp.Integral(f, (s, 0, 1)),
S[1:], sp.cos(sp.pi * dot(S, C))).doit()
sum_terms = [sum_term(integral, C, c_vec)
for c_vec in c_matrix(order, n_dims)]
np_sum_terms = [sp.lambdify(S[0], t, 'numpy')
for t in sum_terms]
def sym_Q_s0_inner(theta, a, s0):
ns0 = (s0 - state_ranges[0]) / (state_ranges[1] - state_ranges[0])
theta_a = theta[a * theta.shape[0]:(a+1) * theta.shape[0]]
return np.dot(theta_a,
np.array[(npst(ns0) for npst in np_sum_terms)])
return sym_Q_s0_inner
|
Add module symbolic Fourier FA integration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import numpy as np
import sympy as sp
def dot(es1, es2):
return sum([e[0]*e[1] for e in zip(es1, es2)])
# Copied from hiora_cartpole.fourier_fa
def c_matrix(order, n_dims):
"""
Generates the parameter (C) vectors for all terms in the Fourier FA.
"""
# All entries from cartesian product {0, …, order+1}^n_dims.
return np.array(
list( itertools.product(range(order+1), repeat=n_dims) ),
dtype=np.int32)
def sum_term(integral, c, c_vec):
return integral.subs(zip(c, c_vec))
def make_sym_Q_s0(state_ranges, order):
n_dims = state_ranges.shape[0]
C = sp.symbols("c0:" + n_dims, integer=True)
S = sp.symbols("s0:" + n_dims, real=True)
integral = reduce(lambda f, s: sp.Integral(f, (s, 0, 1)),
S[1:], sp.cos(sp.pi * dot(S, C))).doit()
sum_terms = [sum_term(integral, C, c_vec)
for c_vec in c_matrix(order, n_dims)]
np_sum_terms = [sp.lambdify(S[0], t, 'numpy')
for t in sum_terms]
def sym_Q_s0_inner(theta, a, s0):
ns0 = (s0 - state_ranges[0]) / (state_ranges[1] - state_ranges[0])
theta_a = theta[a * theta.shape[0]:(a+1) * theta.shape[0]]
return np.dot(theta_a,
np.array[(npst(ns0) for npst in np_sum_terms)])
return sym_Q_s0_inner
|
<commit_before><commit_msg>Add module symbolic Fourier FA integration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import numpy as np
import sympy as sp
def dot(es1, es2):
return sum([e[0]*e[1] for e in zip(es1, es2)])
# Copied from hiora_cartpole.fourier_fa
def c_matrix(order, n_dims):
"""
Generates the parameter (C) vectors for all terms in the Fourier FA.
"""
# All entries from cartesian product {0, …, order+1}^n_dims.
return np.array(
list( itertools.product(range(order+1), repeat=n_dims) ),
dtype=np.int32)
def sum_term(integral, c, c_vec):
return integral.subs(zip(c, c_vec))
def make_sym_Q_s0(state_ranges, order):
n_dims = state_ranges.shape[0]
C = sp.symbols("c0:" + n_dims, integer=True)
S = sp.symbols("s0:" + n_dims, real=True)
integral = reduce(lambda f, s: sp.Integral(f, (s, 0, 1)),
S[1:], sp.cos(sp.pi * dot(S, C))).doit()
sum_terms = [sum_term(integral, C, c_vec)
for c_vec in c_matrix(order, n_dims)]
np_sum_terms = [sp.lambdify(S[0], t, 'numpy')
for t in sum_terms]
def sym_Q_s0_inner(theta, a, s0):
ns0 = (s0 - state_ranges[0]) / (state_ranges[1] - state_ranges[0])
theta_a = theta[a * theta.shape[0]:(a+1) * theta.shape[0]]
return np.dot(theta_a,
np.array[(npst(ns0) for npst in np_sum_terms)])
return sym_Q_s0_inner
|
|
0ffe004cdb15296db20edc0c5bbebe761cad387a
|
tests/test_cmi.py
|
tests/test_cmi.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import contextlib
import itertools
import math
import numpy as np
import pytest
from cgpm.cgpm import CGpm
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.piecewise import PieceWise
from cgpm.utils import general as gu
from bayeslite import bayesdb_open
from bayeslite import bayesdb_register_metamodel
from bayeslite.exception import BQLError
from bayeslite.metamodels.cgpm_metamodel import CGPM_Metamodel
from bayeslite.util import cursor_value
bdb = bayesdb_open(':memory:')
bdb.sql_execute('CREATE TABLE t (a, b, c, d, e)')
for a, b, c, d, e in itertools.product(*([range(2)]*5)):
# XXX Insert synthetic data generator here.
bdb.sql_execute('''
INSERT INTO t (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)
''', (a, b, c, d, e))
bdb.execute('''
CREATE POPULATION p FOR t WITH SCHEMA (
MODEL a, b, c, d, e AS NUMERICAL
)
''')
bdb.execute('CREATE METAMODEL m1 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m1;')
bdb.execute('CREATE METAMODEL m2 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m2;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b USING 10 SAMPLES FROM p')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b FROM p MODELED BY m1')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) FROM p;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1) FROM p;')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b FROM MODELS OF p
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) USING 10 SAMPLES
FROM MODELS OF p USING 10 SAMPLES MODELED BY m2;
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1)
FROM MODELS OF p;
''')
|
Add barebones bql query test cases.
|
Add barebones bql query test cases.
|
Python
|
apache-2.0
|
probcomp/bayeslite,probcomp/bayeslite
|
Add barebones bql query test cases.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import contextlib
import itertools
import math
import numpy as np
import pytest
from cgpm.cgpm import CGpm
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.piecewise import PieceWise
from cgpm.utils import general as gu
from bayeslite import bayesdb_open
from bayeslite import bayesdb_register_metamodel
from bayeslite.exception import BQLError
from bayeslite.metamodels.cgpm_metamodel import CGPM_Metamodel
from bayeslite.util import cursor_value
bdb = bayesdb_open(':memory:')
bdb.sql_execute('CREATE TABLE t (a, b, c, d, e)')
for a, b, c, d, e in itertools.product(*([range(2)]*5)):
# XXX Insert synthetic data generator here.
bdb.sql_execute('''
INSERT INTO t (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)
''', (a, b, c, d, e))
bdb.execute('''
CREATE POPULATION p FOR t WITH SCHEMA (
MODEL a, b, c, d, e AS NUMERICAL
)
''')
bdb.execute('CREATE METAMODEL m1 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m1;')
bdb.execute('CREATE METAMODEL m2 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m2;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b USING 10 SAMPLES FROM p')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b FROM p MODELED BY m1')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) FROM p;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1) FROM p;')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b FROM MODELS OF p
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) USING 10 SAMPLES
FROM MODELS OF p USING 10 SAMPLES MODELED BY m2;
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1)
FROM MODELS OF p;
''')
|
<commit_before><commit_msg>Add barebones bql query test cases.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import contextlib
import itertools
import math
import numpy as np
import pytest
from cgpm.cgpm import CGpm
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.piecewise import PieceWise
from cgpm.utils import general as gu
from bayeslite import bayesdb_open
from bayeslite import bayesdb_register_metamodel
from bayeslite.exception import BQLError
from bayeslite.metamodels.cgpm_metamodel import CGPM_Metamodel
from bayeslite.util import cursor_value
bdb = bayesdb_open(':memory:')
bdb.sql_execute('CREATE TABLE t (a, b, c, d, e)')
for a, b, c, d, e in itertools.product(*([range(2)]*5)):
# XXX Insert synthetic data generator here.
bdb.sql_execute('''
INSERT INTO t (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)
''', (a, b, c, d, e))
bdb.execute('''
CREATE POPULATION p FOR t WITH SCHEMA (
MODEL a, b, c, d, e AS NUMERICAL
)
''')
bdb.execute('CREATE METAMODEL m1 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m1;')
bdb.execute('CREATE METAMODEL m2 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m2;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b USING 10 SAMPLES FROM p')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b FROM p MODELED BY m1')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) FROM p;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1) FROM p;')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b FROM MODELS OF p
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) USING 10 SAMPLES
FROM MODELS OF p USING 10 SAMPLES MODELED BY m2;
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1)
FROM MODELS OF p;
''')
|
Add barebones bql query test cases.# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import contextlib
import itertools
import math
import numpy as np
import pytest
from cgpm.cgpm import CGpm
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.piecewise import PieceWise
from cgpm.utils import general as gu
from bayeslite import bayesdb_open
from bayeslite import bayesdb_register_metamodel
from bayeslite.exception import BQLError
from bayeslite.metamodels.cgpm_metamodel import CGPM_Metamodel
from bayeslite.util import cursor_value
bdb = bayesdb_open(':memory:')
bdb.sql_execute('CREATE TABLE t (a, b, c, d, e)')
for a, b, c, d, e in itertools.product(*([range(2)]*5)):
# XXX Insert synthetic data generator here.
bdb.sql_execute('''
INSERT INTO t (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)
''', (a, b, c, d, e))
bdb.execute('''
CREATE POPULATION p FOR t WITH SCHEMA (
MODEL a, b, c, d, e AS NUMERICAL
)
''')
bdb.execute('CREATE METAMODEL m1 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m1;')
bdb.execute('CREATE METAMODEL m2 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m2;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b USING 10 SAMPLES FROM p')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b FROM p MODELED BY m1')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) FROM p;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1) FROM p;')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b FROM MODELS OF p
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) USING 10 SAMPLES
FROM MODELS OF p USING 10 SAMPLES MODELED BY m2;
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1)
FROM MODELS OF p;
''')
|
<commit_before><commit_msg>Add barebones bql query test cases.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import contextlib
import itertools
import math
import numpy as np
import pytest
from cgpm.cgpm import CGpm
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.piecewise import PieceWise
from cgpm.utils import general as gu
from bayeslite import bayesdb_open
from bayeslite import bayesdb_register_metamodel
from bayeslite.exception import BQLError
from bayeslite.metamodels.cgpm_metamodel import CGPM_Metamodel
from bayeslite.util import cursor_value
bdb = bayesdb_open(':memory:')
bdb.sql_execute('CREATE TABLE t (a, b, c, d, e)')
for a, b, c, d, e in itertools.product(*([range(2)]*5)):
# XXX Insert synthetic data generator here.
bdb.sql_execute('''
INSERT INTO t (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)
''', (a, b, c, d, e))
bdb.execute('''
CREATE POPULATION p FOR t WITH SCHEMA (
MODEL a, b, c, d, e AS NUMERICAL
)
''')
bdb.execute('CREATE METAMODEL m1 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m1;')
bdb.execute('CREATE METAMODEL m2 FOR p;')
bdb.execute('INITIALIZE 10 MODELS FOR m2;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b USING 10 SAMPLES FROM p')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b FROM p MODELED BY m1')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) FROM p;')
bdb.execute('ESTIMATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1) FROM p;')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b FROM MODELS OF p
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (c = 1) USING 10 SAMPLES
FROM MODELS OF p USING 10 SAMPLES MODELED BY m2;
''')
bdb.execute('''
SIMULATE MUTUAL INFORMATION OF a WITH b GIVEN (d, c = 1)
FROM MODELS OF p;
''')
|
|
908f61e4cc19f0ee1586480389afe494c900154a
|
test/test_commands.py
|
test/test_commands.py
|
import os
import pytest
TEST_URL = str(os.environ.get('TEST_URL'))
if TEST_URL != 'None' and TEST_URL.strip():
pass
else:
TEST_URL="https://192.168.10.10:8443/api"
@pytest.mark.parametrize("command", [
("curl -k " + TEST_URL + " | grep 'kind.*:.*APIVersions'"),
("oc get node | grep '10.0.2.15.*Ready'"),
("oc get pod -n default | grep 'docker-registry.*Running'"),
("oc get pod -n default | grep 'router.*Running'"),
])
def test_commands(host, command):
cmd = host.run(command)
assert cmd.rc == 0
|
Add test for command output
|
Add test for command output
|
Python
|
mit
|
wicksy/vagrant-openshift,wicksy/vagrant-openshift,wicksy/vagrant-openshift
|
Add test for command output
|
import os
import pytest
TEST_URL = str(os.environ.get('TEST_URL'))
if TEST_URL != 'None' and TEST_URL.strip():
pass
else:
TEST_URL="https://192.168.10.10:8443/api"
@pytest.mark.parametrize("command", [
("curl -k " + TEST_URL + " | grep 'kind.*:.*APIVersions'"),
("oc get node | grep '10.0.2.15.*Ready'"),
("oc get pod -n default | grep 'docker-registry.*Running'"),
("oc get pod -n default | grep 'router.*Running'"),
])
def test_commands(host, command):
cmd = host.run(command)
assert cmd.rc == 0
|
<commit_before><commit_msg>Add test for command output<commit_after>
|
import os
import pytest
TEST_URL = str(os.environ.get('TEST_URL'))
if TEST_URL != 'None' and TEST_URL.strip():
pass
else:
TEST_URL="https://192.168.10.10:8443/api"
@pytest.mark.parametrize("command", [
("curl -k " + TEST_URL + " | grep 'kind.*:.*APIVersions'"),
("oc get node | grep '10.0.2.15.*Ready'"),
("oc get pod -n default | grep 'docker-registry.*Running'"),
("oc get pod -n default | grep 'router.*Running'"),
])
def test_commands(host, command):
cmd = host.run(command)
assert cmd.rc == 0
|
Add test for command outputimport os
import pytest
TEST_URL = str(os.environ.get('TEST_URL'))
if TEST_URL != 'None' and TEST_URL.strip():
pass
else:
TEST_URL="https://192.168.10.10:8443/api"
@pytest.mark.parametrize("command", [
("curl -k " + TEST_URL + " | grep 'kind.*:.*APIVersions'"),
("oc get node | grep '10.0.2.15.*Ready'"),
("oc get pod -n default | grep 'docker-registry.*Running'"),
("oc get pod -n default | grep 'router.*Running'"),
])
def test_commands(host, command):
cmd = host.run(command)
assert cmd.rc == 0
|
<commit_before><commit_msg>Add test for command output<commit_after>import os
import pytest
TEST_URL = str(os.environ.get('TEST_URL'))
if TEST_URL != 'None' and TEST_URL.strip():
pass
else:
TEST_URL="https://192.168.10.10:8443/api"
@pytest.mark.parametrize("command", [
("curl -k " + TEST_URL + " | grep 'kind.*:.*APIVersions'"),
("oc get node | grep '10.0.2.15.*Ready'"),
("oc get pod -n default | grep 'docker-registry.*Running'"),
("oc get pod -n default | grep 'router.*Running'"),
])
def test_commands(host, command):
cmd = host.run(command)
assert cmd.rc == 0
|
|
d566acfc2d7f4ac33b64c1c950d4ec7d32199d86
|
tests/test_helpers.py
|
tests/test_helpers.py
|
from api.search import helpers
def test_reverse_complement():
seq = "ATGCCCTGA"
rc_seq = "TCAGGGCAT"
assert helpers.reverse_completement(seq) == rc_seq
def test_calculate_sequence():
seq = "ATGCCCTGA"
assert helpers.calculate_sequence('+', seq) == seq
assert helpers.calculate_sequence('-', seq) == helpers.reverse_completement(seq)
|
Increase test coverage for helper functions
|
search: Increase test coverage for helper functions
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>
|
Python
|
agpl-3.0
|
antismash/db-api,antismash/db-api
|
search: Increase test coverage for helper functions
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>
|
from api.search import helpers
def test_reverse_complement():
seq = "ATGCCCTGA"
rc_seq = "TCAGGGCAT"
assert helpers.reverse_completement(seq) == rc_seq
def test_calculate_sequence():
seq = "ATGCCCTGA"
assert helpers.calculate_sequence('+', seq) == seq
assert helpers.calculate_sequence('-', seq) == helpers.reverse_completement(seq)
|
<commit_before><commit_msg>search: Increase test coverage for helper functions
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk><commit_after>
|
from api.search import helpers
def test_reverse_complement():
seq = "ATGCCCTGA"
rc_seq = "TCAGGGCAT"
assert helpers.reverse_completement(seq) == rc_seq
def test_calculate_sequence():
seq = "ATGCCCTGA"
assert helpers.calculate_sequence('+', seq) == seq
assert helpers.calculate_sequence('-', seq) == helpers.reverse_completement(seq)
|
search: Increase test coverage for helper functions
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk>from api.search import helpers
def test_reverse_complement():
seq = "ATGCCCTGA"
rc_seq = "TCAGGGCAT"
assert helpers.reverse_completement(seq) == rc_seq
def test_calculate_sequence():
seq = "ATGCCCTGA"
assert helpers.calculate_sequence('+', seq) == seq
assert helpers.calculate_sequence('-', seq) == helpers.reverse_completement(seq)
|
<commit_before><commit_msg>search: Increase test coverage for helper functions
Signed-off-by: Kai Blin <ad3597797f6179d503c382b2627cc19939309418@biosustain.dtu.dk><commit_after>from api.search import helpers
def test_reverse_complement():
seq = "ATGCCCTGA"
rc_seq = "TCAGGGCAT"
assert helpers.reverse_completement(seq) == rc_seq
def test_calculate_sequence():
seq = "ATGCCCTGA"
assert helpers.calculate_sequence('+', seq) == seq
assert helpers.calculate_sequence('-', seq) == helpers.reverse_completement(seq)
|
|
0958e53a8132b168d0adcfad30b56251fb9e7132
|
jobmon/test/test_event_server.py
|
jobmon/test/test_event_server.py
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import event_server, transport
PORT = 9999
class TestEventServer(unittest.TestCase):
def test_event_server(self):
event_srv = event_server.EventServer(PORT)
event_srv.start()
time.sleep(5) # Allow the event server time to accept clients
event_client_a = transport.EventStream(PORT)
event_client_b = transport.EventStream(PORT)
event_client_c = transport.EventStream(PORT)
time.sleep(5) # Wait for all the accepts to process, to ensure events
# aren't dropped
try:
event_codes = (EVENT_STARTJOB, EVENT_STOPJOB, EVENT_RESTARTJOB,
EVENT_TERMINATE)
events = [Event('some_job', code) for code in event_codes]
for event in events:
event_srv.send(event.job_name, event.event_code)
self.assertEqual(event_client_a.next_event(), event)
self.assertEqual(event_client_b.next_event(), event)
self.assertEqual(event_client_c.next_event(), event)
finally:
event_srv.terminate()
event_client_a.destroy()
event_client_b.destroy()
event_client_c.destroy()
|
Add a test for the event server
|
Add a test for the event server
|
Python
|
bsd-2-clause
|
adamnew123456/jobmon
|
Add a test for the event server
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import event_server, transport
PORT = 9999
class TestEventServer(unittest.TestCase):
def test_event_server(self):
event_srv = event_server.EventServer(PORT)
event_srv.start()
time.sleep(5) # Allow the event server time to accept clients
event_client_a = transport.EventStream(PORT)
event_client_b = transport.EventStream(PORT)
event_client_c = transport.EventStream(PORT)
time.sleep(5) # Wait for all the accepts to process, to ensure events
# aren't dropped
try:
event_codes = (EVENT_STARTJOB, EVENT_STOPJOB, EVENT_RESTARTJOB,
EVENT_TERMINATE)
events = [Event('some_job', code) for code in event_codes]
for event in events:
event_srv.send(event.job_name, event.event_code)
self.assertEqual(event_client_a.next_event(), event)
self.assertEqual(event_client_b.next_event(), event)
self.assertEqual(event_client_c.next_event(), event)
finally:
event_srv.terminate()
event_client_a.destroy()
event_client_b.destroy()
event_client_c.destroy()
|
<commit_before><commit_msg>Add a test for the event server<commit_after>
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import event_server, transport
PORT = 9999
class TestEventServer(unittest.TestCase):
def test_event_server(self):
event_srv = event_server.EventServer(PORT)
event_srv.start()
time.sleep(5) # Allow the event server time to accept clients
event_client_a = transport.EventStream(PORT)
event_client_b = transport.EventStream(PORT)
event_client_c = transport.EventStream(PORT)
time.sleep(5) # Wait for all the accepts to process, to ensure events
# aren't dropped
try:
event_codes = (EVENT_STARTJOB, EVENT_STOPJOB, EVENT_RESTARTJOB,
EVENT_TERMINATE)
events = [Event('some_job', code) for code in event_codes]
for event in events:
event_srv.send(event.job_name, event.event_code)
self.assertEqual(event_client_a.next_event(), event)
self.assertEqual(event_client_b.next_event(), event)
self.assertEqual(event_client_c.next_event(), event)
finally:
event_srv.terminate()
event_client_a.destroy()
event_client_b.destroy()
event_client_c.destroy()
|
Add a test for the event serverimport os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import event_server, transport
PORT = 9999
class TestEventServer(unittest.TestCase):
def test_event_server(self):
event_srv = event_server.EventServer(PORT)
event_srv.start()
time.sleep(5) # Allow the event server time to accept clients
event_client_a = transport.EventStream(PORT)
event_client_b = transport.EventStream(PORT)
event_client_c = transport.EventStream(PORT)
time.sleep(5) # Wait for all the accepts to process, to ensure events
# aren't dropped
try:
event_codes = (EVENT_STARTJOB, EVENT_STOPJOB, EVENT_RESTARTJOB,
EVENT_TERMINATE)
events = [Event('some_job', code) for code in event_codes]
for event in events:
event_srv.send(event.job_name, event.event_code)
self.assertEqual(event_client_a.next_event(), event)
self.assertEqual(event_client_b.next_event(), event)
self.assertEqual(event_client_c.next_event(), event)
finally:
event_srv.terminate()
event_client_a.destroy()
event_client_b.destroy()
event_client_c.destroy()
|
<commit_before><commit_msg>Add a test for the event server<commit_after>import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import event_server, transport
PORT = 9999
class TestEventServer(unittest.TestCase):
def test_event_server(self):
event_srv = event_server.EventServer(PORT)
event_srv.start()
time.sleep(5) # Allow the event server time to accept clients
event_client_a = transport.EventStream(PORT)
event_client_b = transport.EventStream(PORT)
event_client_c = transport.EventStream(PORT)
time.sleep(5) # Wait for all the accepts to process, to ensure events
# aren't dropped
try:
event_codes = (EVENT_STARTJOB, EVENT_STOPJOB, EVENT_RESTARTJOB,
EVENT_TERMINATE)
events = [Event('some_job', code) for code in event_codes]
for event in events:
event_srv.send(event.job_name, event.event_code)
self.assertEqual(event_client_a.next_event(), event)
self.assertEqual(event_client_b.next_event(), event)
self.assertEqual(event_client_c.next_event(), event)
finally:
event_srv.terminate()
event_client_a.destroy()
event_client_b.destroy()
event_client_c.destroy()
|
|
e52b58e823e803c0cce47a1e24e41577088a25a0
|
future/tests/test_imports_urllib.py
|
future/tests/test_imports_urllib.py
|
import unittest
import sys
print([m for m in sys.modules if m.startswith('urllib')])
class MyTest(unittest.TestCase):
def test_urllib(self):
import urllib
print(urllib.__file__)
from future import standard_library
with standard_library.hooks():
import urllib.response
print(urllib.__file__)
print(urllib.response.__file__)
unittest.main()
|
Add a new little urllib import test
|
Add a new little urllib import test
|
Python
|
mit
|
michaelpacer/python-future,michaelpacer/python-future,QuLogic/python-future,krischer/python-future,krischer/python-future,PythonCharmers/python-future,PythonCharmers/python-future,QuLogic/python-future
|
Add a new little urllib import test
|
import unittest
import sys
print([m for m in sys.modules if m.startswith('urllib')])
class MyTest(unittest.TestCase):
def test_urllib(self):
import urllib
print(urllib.__file__)
from future import standard_library
with standard_library.hooks():
import urllib.response
print(urllib.__file__)
print(urllib.response.__file__)
unittest.main()
|
<commit_before><commit_msg>Add a new little urllib import test<commit_after>
|
import unittest
import sys
print([m for m in sys.modules if m.startswith('urllib')])
class MyTest(unittest.TestCase):
def test_urllib(self):
import urllib
print(urllib.__file__)
from future import standard_library
with standard_library.hooks():
import urllib.response
print(urllib.__file__)
print(urllib.response.__file__)
unittest.main()
|
Add a new little urllib import testimport unittest
import sys
print([m for m in sys.modules if m.startswith('urllib')])
class MyTest(unittest.TestCase):
def test_urllib(self):
import urllib
print(urllib.__file__)
from future import standard_library
with standard_library.hooks():
import urllib.response
print(urllib.__file__)
print(urllib.response.__file__)
unittest.main()
|
<commit_before><commit_msg>Add a new little urllib import test<commit_after>import unittest
import sys
print([m for m in sys.modules if m.startswith('urllib')])
class MyTest(unittest.TestCase):
def test_urllib(self):
import urllib
print(urllib.__file__)
from future import standard_library
with standard_library.hooks():
import urllib.response
print(urllib.__file__)
print(urllib.response.__file__)
unittest.main()
|
|
91c36cd8cccae258dc9dfd51415b328eac9bdbbe
|
scripts/OpenFOAM/plotCFL.py
|
scripts/OpenFOAM/plotCFL.py
|
import numpy as np
from matplotlib.pyplot import *
CFL = []
inFile = open('./log_0_20','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_20_30','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_30_40','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
figure(0)
grid(True)
plot(CFL,linewidth=2)
show()
|
Add script to plot CFL - needs to be modified
|
Add script to plot CFL - needs to be modified
|
Python
|
mit
|
mesnardo/snake
|
Add script to plot CFL - needs to be modified
|
import numpy as np
from matplotlib.pyplot import *
CFL = []
inFile = open('./log_0_20','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_20_30','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_30_40','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
figure(0)
grid(True)
plot(CFL,linewidth=2)
show()
|
<commit_before><commit_msg>Add script to plot CFL - needs to be modified<commit_after>
|
import numpy as np
from matplotlib.pyplot import *
CFL = []
inFile = open('./log_0_20','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_20_30','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_30_40','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
figure(0)
grid(True)
plot(CFL,linewidth=2)
show()
|
Add script to plot CFL - needs to be modifiedimport numpy as np
from matplotlib.pyplot import *
CFL = []
inFile = open('./log_0_20','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_20_30','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_30_40','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
figure(0)
grid(True)
plot(CFL,linewidth=2)
show()
|
<commit_before><commit_msg>Add script to plot CFL - needs to be modified<commit_after>import numpy as np
from matplotlib.pyplot import *
CFL = []
inFile = open('./log_0_20','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_20_30','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
inFile = open('./log_30_40','r')
for line in inFile:
if ('Courant Number mean' in str(line)):
field = line.split()
CFL.append(float(field[len(field)-1]))
inFile.close()
figure(0)
grid(True)
plot(CFL,linewidth=2)
show()
|
|
06fc94beb1f0e557be11010eaaf9635350940865
|
insertion_sort.py
|
insertion_sort.py
|
def insertion_sort(un_list):
for idx in range(1, len(un_list)):
current = un_list[idx]
position = idx
while position > 0 and un_list[position-1] > current:
un_list[position] = un_list[position-1]
position = position - 1
un_list[position] = current
if __name__ == '__main__':
pass
|
Add insertion sort method to module.
|
Add insertion sort method to module.
|
Python
|
mit
|
jonathanstallings/data-structures
|
Add insertion sort method to module.
|
def insertion_sort(un_list):
for idx in range(1, len(un_list)):
current = un_list[idx]
position = idx
while position > 0 and un_list[position-1] > current:
un_list[position] = un_list[position-1]
position = position - 1
un_list[position] = current
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add insertion sort method to module.<commit_after>
|
def insertion_sort(un_list):
for idx in range(1, len(un_list)):
current = un_list[idx]
position = idx
while position > 0 and un_list[position-1] > current:
un_list[position] = un_list[position-1]
position = position - 1
un_list[position] = current
if __name__ == '__main__':
pass
|
Add insertion sort method to module.def insertion_sort(un_list):
for idx in range(1, len(un_list)):
current = un_list[idx]
position = idx
while position > 0 and un_list[position-1] > current:
un_list[position] = un_list[position-1]
position = position - 1
un_list[position] = current
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add insertion sort method to module.<commit_after>def insertion_sort(un_list):
for idx in range(1, len(un_list)):
current = un_list[idx]
position = idx
while position > 0 and un_list[position-1] > current:
un_list[position] = un_list[position-1]
position = position - 1
un_list[position] = current
if __name__ == '__main__':
pass
|
|
facf99d88c09dbc1661bfd9a5151d15756ffb901
|
__init__.py
|
__init__.py
|
import httplib2
import simplejson
import logging
from urlparse import urljoin
import types
# TODO configurable?
BASE_URL = 'http://127.0.0.1:8080/'
class RemoteObject(object):
fields = ()
objects = {}
def __init__(self, **kwargs):
# create object properties for all desired fields
for field_name in self.__class__.fields:
value = kwargs.get(field_name)
setattr(self, field_name, value)
# create children objects from a property name : RemoteObject pair
for obj_name, obj_class in self.objects.iteritems():
value = kwargs.get(obj_name)
if type(value) is types.ListType or type(value) is types.TupleType:
obj = []
for item in value:
o = obj_class(**item)
o.parent = self
obj.append(o)
else:
obj = obj_class(**value)
obj.parent = self # e.g. reference to blog from entry
setattr(self, obj_name, obj)
@classmethod
def get(cls, id, http=None):
# TODO accept atom or whatever other response format
url = cls.url % {'id': id}
url = urljoin(BASE_URL, url)
logging.debug('Fetching %s' % (url,))
if http is None:
http = httplib2.Http()
(response, content) = http.request(url)
logging.debug('Got content %s' % (content,))
# TODO make sure astropad is returning the proper content type
#if data and resp.get('content-type') == 'application/json':
data = simplejson.loads(content)
str_data = dict([(k.encode('ascii', 'ignore'), v) for k, v in data.iteritems()])
return cls(**str_data)
class User(RemoteObject):
"""User from TypePad API.
>>> user = User.get(1)
>>> user.name
u'Mike Malone'
>>> user.email
u'mjmalone@gmail.com'
"""
fields = ('name', 'email', 'uri')
url = r'/users/%(id)s.json'
class Entry(RemoteObject):
fields = ('slug', 'title', 'content', 'pub_date', 'mod_date')
objects = {'authors': User}
class Blog(RemoteObject):
"""Blog from TypePad API.
>>> blog = Blog.get(1)
>>> blog.title
u'Fred'
"""
fields = ('title', 'subtitle')
objects = {'entries': Entry}
url = r'/blogs/%(id)s.json'
def get_entries(self):
pass
|
Move typepad module into a directory so it can be externals-ed
|
Move typepad module into a directory so it can be externals-ed
|
Python
|
bsd-3-clause
|
typepad/python-typepad-api
|
Move typepad module into a directory so it can be externals-ed
|
import httplib2
import simplejson
import logging
from urlparse import urljoin
import types
# TODO configurable?
BASE_URL = 'http://127.0.0.1:8080/'
class RemoteObject(object):
fields = ()
objects = {}
def __init__(self, **kwargs):
# create object properties for all desired fields
for field_name in self.__class__.fields:
value = kwargs.get(field_name)
setattr(self, field_name, value)
# create children objects from a property name : RemoteObject pair
for obj_name, obj_class in self.objects.iteritems():
value = kwargs.get(obj_name)
if type(value) is types.ListType or type(value) is types.TupleType:
obj = []
for item in value:
o = obj_class(**item)
o.parent = self
obj.append(o)
else:
obj = obj_class(**value)
obj.parent = self # e.g. reference to blog from entry
setattr(self, obj_name, obj)
@classmethod
def get(cls, id, http=None):
# TODO accept atom or whatever other response format
url = cls.url % {'id': id}
url = urljoin(BASE_URL, url)
logging.debug('Fetching %s' % (url,))
if http is None:
http = httplib2.Http()
(response, content) = http.request(url)
logging.debug('Got content %s' % (content,))
# TODO make sure astropad is returning the proper content type
#if data and resp.get('content-type') == 'application/json':
data = simplejson.loads(content)
str_data = dict([(k.encode('ascii', 'ignore'), v) for k, v in data.iteritems()])
return cls(**str_data)
class User(RemoteObject):
"""User from TypePad API.
>>> user = User.get(1)
>>> user.name
u'Mike Malone'
>>> user.email
u'mjmalone@gmail.com'
"""
fields = ('name', 'email', 'uri')
url = r'/users/%(id)s.json'
class Entry(RemoteObject):
fields = ('slug', 'title', 'content', 'pub_date', 'mod_date')
objects = {'authors': User}
class Blog(RemoteObject):
"""Blog from TypePad API.
>>> blog = Blog.get(1)
>>> blog.title
u'Fred'
"""
fields = ('title', 'subtitle')
objects = {'entries': Entry}
url = r'/blogs/%(id)s.json'
def get_entries(self):
pass
|
<commit_before><commit_msg>Move typepad module into a directory so it can be externals-ed<commit_after>
|
import httplib2
import simplejson
import logging
from urlparse import urljoin
import types
# TODO configurable?
BASE_URL = 'http://127.0.0.1:8080/'
class RemoteObject(object):
fields = ()
objects = {}
def __init__(self, **kwargs):
# create object properties for all desired fields
for field_name in self.__class__.fields:
value = kwargs.get(field_name)
setattr(self, field_name, value)
# create children objects from a property name : RemoteObject pair
for obj_name, obj_class in self.objects.iteritems():
value = kwargs.get(obj_name)
if type(value) is types.ListType or type(value) is types.TupleType:
obj = []
for item in value:
o = obj_class(**item)
o.parent = self
obj.append(o)
else:
obj = obj_class(**value)
obj.parent = self # e.g. reference to blog from entry
setattr(self, obj_name, obj)
@classmethod
def get(cls, id, http=None):
# TODO accept atom or whatever other response format
url = cls.url % {'id': id}
url = urljoin(BASE_URL, url)
logging.debug('Fetching %s' % (url,))
if http is None:
http = httplib2.Http()
(response, content) = http.request(url)
logging.debug('Got content %s' % (content,))
# TODO make sure astropad is returning the proper content type
#if data and resp.get('content-type') == 'application/json':
data = simplejson.loads(content)
str_data = dict([(k.encode('ascii', 'ignore'), v) for k, v in data.iteritems()])
return cls(**str_data)
class User(RemoteObject):
"""User from TypePad API.
>>> user = User.get(1)
>>> user.name
u'Mike Malone'
>>> user.email
u'mjmalone@gmail.com'
"""
fields = ('name', 'email', 'uri')
url = r'/users/%(id)s.json'
class Entry(RemoteObject):
fields = ('slug', 'title', 'content', 'pub_date', 'mod_date')
objects = {'authors': User}
class Blog(RemoteObject):
"""Blog from TypePad API.
>>> blog = Blog.get(1)
>>> blog.title
u'Fred'
"""
fields = ('title', 'subtitle')
objects = {'entries': Entry}
url = r'/blogs/%(id)s.json'
def get_entries(self):
pass
|
Move typepad module into a directory so it can be externals-edimport httplib2
import simplejson
import logging
from urlparse import urljoin
import types
# TODO configurable?
BASE_URL = 'http://127.0.0.1:8080/'
class RemoteObject(object):
fields = ()
objects = {}
def __init__(self, **kwargs):
# create object properties for all desired fields
for field_name in self.__class__.fields:
value = kwargs.get(field_name)
setattr(self, field_name, value)
# create children objects from a property name : RemoteObject pair
for obj_name, obj_class in self.objects.iteritems():
value = kwargs.get(obj_name)
if type(value) is types.ListType or type(value) is types.TupleType:
obj = []
for item in value:
o = obj_class(**item)
o.parent = self
obj.append(o)
else:
obj = obj_class(**value)
obj.parent = self # e.g. reference to blog from entry
setattr(self, obj_name, obj)
@classmethod
def get(cls, id, http=None):
# TODO accept atom or whatever other response format
url = cls.url % {'id': id}
url = urljoin(BASE_URL, url)
logging.debug('Fetching %s' % (url,))
if http is None:
http = httplib2.Http()
(response, content) = http.request(url)
logging.debug('Got content %s' % (content,))
# TODO make sure astropad is returning the proper content type
#if data and resp.get('content-type') == 'application/json':
data = simplejson.loads(content)
str_data = dict([(k.encode('ascii', 'ignore'), v) for k, v in data.iteritems()])
return cls(**str_data)
class User(RemoteObject):
"""User from TypePad API.
>>> user = User.get(1)
>>> user.name
u'Mike Malone'
>>> user.email
u'mjmalone@gmail.com'
"""
fields = ('name', 'email', 'uri')
url = r'/users/%(id)s.json'
class Entry(RemoteObject):
fields = ('slug', 'title', 'content', 'pub_date', 'mod_date')
objects = {'authors': User}
class Blog(RemoteObject):
"""Blog from TypePad API.
>>> blog = Blog.get(1)
>>> blog.title
u'Fred'
"""
fields = ('title', 'subtitle')
objects = {'entries': Entry}
url = r'/blogs/%(id)s.json'
def get_entries(self):
pass
|
<commit_before><commit_msg>Move typepad module into a directory so it can be externals-ed<commit_after>import httplib2
import simplejson
import logging
from urlparse import urljoin
import types
# TODO configurable?
BASE_URL = 'http://127.0.0.1:8080/'
class RemoteObject(object):
fields = ()
objects = {}
def __init__(self, **kwargs):
# create object properties for all desired fields
for field_name in self.__class__.fields:
value = kwargs.get(field_name)
setattr(self, field_name, value)
# create children objects from a property name : RemoteObject pair
for obj_name, obj_class in self.objects.iteritems():
value = kwargs.get(obj_name)
if type(value) is types.ListType or type(value) is types.TupleType:
obj = []
for item in value:
o = obj_class(**item)
o.parent = self
obj.append(o)
else:
obj = obj_class(**value)
obj.parent = self # e.g. reference to blog from entry
setattr(self, obj_name, obj)
@classmethod
def get(cls, id, http=None):
# TODO accept atom or whatever other response format
url = cls.url % {'id': id}
url = urljoin(BASE_URL, url)
logging.debug('Fetching %s' % (url,))
if http is None:
http = httplib2.Http()
(response, content) = http.request(url)
logging.debug('Got content %s' % (content,))
# TODO make sure astropad is returning the proper content type
#if data and resp.get('content-type') == 'application/json':
data = simplejson.loads(content)
str_data = dict([(k.encode('ascii', 'ignore'), v) for k, v in data.iteritems()])
return cls(**str_data)
class User(RemoteObject):
"""User from TypePad API.
>>> user = User.get(1)
>>> user.name
u'Mike Malone'
>>> user.email
u'mjmalone@gmail.com'
"""
fields = ('name', 'email', 'uri')
url = r'/users/%(id)s.json'
class Entry(RemoteObject):
fields = ('slug', 'title', 'content', 'pub_date', 'mod_date')
objects = {'authors': User}
class Blog(RemoteObject):
"""Blog from TypePad API.
>>> blog = Blog.get(1)
>>> blog.title
u'Fred'
"""
fields = ('title', 'subtitle')
objects = {'entries': Entry}
url = r'/blogs/%(id)s.json'
def get_entries(self):
pass
|
|
4a5bfa4113a25ba128c0ee8d2a75b12e3f0c3e18
|
scripts/gtr-pages.py
|
scripts/gtr-pages.py
|
from __future__ import print_function
import json, os, sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: gtr-pages.py <input directory> <output directory>', file=sys.stderr)
exit(1)
indir = sys.argv[1]
outdir = sys.argv[2]
os.mkdir(outdir)
for file in os.listdir(os.fsencode(indir)):
fname = os.fsdecode(file)
if fname.endswith('.json'):
for line in open(os.path.join(indir, fname), 'r'):
rec = json.loads(line)
out = open(os.path.join(outdir, rec['name'] + '.html'), 'w')
print(rec['text'], file=out)
out.close()
|
Add script to write cluster HTML files for GtR.
|
Add script to write cluster HTML files for GtR.
|
Python
|
apache-2.0
|
ViralTexts/vt-passim,ViralTexts/vt-passim,ViralTexts/vt-passim
|
Add script to write cluster HTML files for GtR.
|
from __future__ import print_function
import json, os, sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: gtr-pages.py <input directory> <output directory>', file=sys.stderr)
exit(1)
indir = sys.argv[1]
outdir = sys.argv[2]
os.mkdir(outdir)
for file in os.listdir(os.fsencode(indir)):
fname = os.fsdecode(file)
if fname.endswith('.json'):
for line in open(os.path.join(indir, fname), 'r'):
rec = json.loads(line)
out = open(os.path.join(outdir, rec['name'] + '.html'), 'w')
print(rec['text'], file=out)
out.close()
|
<commit_before><commit_msg>Add script to write cluster HTML files for GtR.<commit_after>
|
from __future__ import print_function
import json, os, sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: gtr-pages.py <input directory> <output directory>', file=sys.stderr)
exit(1)
indir = sys.argv[1]
outdir = sys.argv[2]
os.mkdir(outdir)
for file in os.listdir(os.fsencode(indir)):
fname = os.fsdecode(file)
if fname.endswith('.json'):
for line in open(os.path.join(indir, fname), 'r'):
rec = json.loads(line)
out = open(os.path.join(outdir, rec['name'] + '.html'), 'w')
print(rec['text'], file=out)
out.close()
|
Add script to write cluster HTML files for GtR.from __future__ import print_function
import json, os, sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: gtr-pages.py <input directory> <output directory>', file=sys.stderr)
exit(1)
indir = sys.argv[1]
outdir = sys.argv[2]
os.mkdir(outdir)
for file in os.listdir(os.fsencode(indir)):
fname = os.fsdecode(file)
if fname.endswith('.json'):
for line in open(os.path.join(indir, fname), 'r'):
rec = json.loads(line)
out = open(os.path.join(outdir, rec['name'] + '.html'), 'w')
print(rec['text'], file=out)
out.close()
|
<commit_before><commit_msg>Add script to write cluster HTML files for GtR.<commit_after>from __future__ import print_function
import json, os, sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: gtr-pages.py <input directory> <output directory>', file=sys.stderr)
exit(1)
indir = sys.argv[1]
outdir = sys.argv[2]
os.mkdir(outdir)
for file in os.listdir(os.fsencode(indir)):
fname = os.fsdecode(file)
if fname.endswith('.json'):
for line in open(os.path.join(indir, fname), 'r'):
rec = json.loads(line)
out = open(os.path.join(outdir, rec['name'] + '.html'), 'w')
print(rec['text'], file=out)
out.close()
|
|
3b047ef48ae89d49d98675a0720a856cee829deb
|
scripts/read_kest.py
|
scripts/read_kest.py
|
"""
Reads thermal conductivity and statistics for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.parameters import k_parameters
from thermof.read import read_run_info, read_trial
# ---------------------------------------------------
main = '' # Directory of trials
results_file = '%s-kest-results.yaml' % os.path.basename(main) # Name of results file
# ---------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(k=[], max=[], min=[], std=[], sigma=[], epsilon=[], trial=[])
k_par = k_parameters.copy()
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
ri = read_run_info(os.path.join(trial, 'Run1'))
results['sigma'].append(ri['sigma'])
results['epsilon'].append(ri['epsilon'])
results['trial'].append(os.path.basename(trial))
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
# sim = Simulation(read=trial, setup='trial', parameters=k_par)
trial = read_trial(trial, k_par=k_par, t0=5, t1=10, verbose=False)
results['k'].append(trial['avg']['k_est']['iso'])
results['max'].append(trial['avg']['k_est']['stats']['iso']['max'])
results['min'].append(trial['avg']['k_est']['stats']['iso']['min'])
results['std'].append(float(trial['avg']['k_est']['stats']['iso']['std']))
print('k: %.2f | std: %.2f | max: %.2f | min: %.2f'
% (results['k'][-1], results['std'][-1], results['max'][-1], results['min'][-1]))
else:
results['k'].append(None)
results['std'].append(None)
results['max'].append(None)
results['min'].append(None)
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
|
Add script for reading k for list of trials
|
Add script for reading k for list of trials
|
Python
|
mit
|
kbsezginel/tee_mof,kbsezginel/tee_mof
|
Add script for reading k for list of trials
|
"""
Reads thermal conductivity and statistics for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.parameters import k_parameters
from thermof.read import read_run_info, read_trial
# ---------------------------------------------------
main = '' # Directory of trials
results_file = '%s-kest-results.yaml' % os.path.basename(main) # Name of results file
# ---------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(k=[], max=[], min=[], std=[], sigma=[], epsilon=[], trial=[])
k_par = k_parameters.copy()
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
ri = read_run_info(os.path.join(trial, 'Run1'))
results['sigma'].append(ri['sigma'])
results['epsilon'].append(ri['epsilon'])
results['trial'].append(os.path.basename(trial))
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
# sim = Simulation(read=trial, setup='trial', parameters=k_par)
trial = read_trial(trial, k_par=k_par, t0=5, t1=10, verbose=False)
results['k'].append(trial['avg']['k_est']['iso'])
results['max'].append(trial['avg']['k_est']['stats']['iso']['max'])
results['min'].append(trial['avg']['k_est']['stats']['iso']['min'])
results['std'].append(float(trial['avg']['k_est']['stats']['iso']['std']))
print('k: %.2f | std: %.2f | max: %.2f | min: %.2f'
% (results['k'][-1], results['std'][-1], results['max'][-1], results['min'][-1]))
else:
results['k'].append(None)
results['std'].append(None)
results['max'].append(None)
results['min'].append(None)
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
|
<commit_before><commit_msg>Add script for reading k for list of trials<commit_after>
|
"""
Reads thermal conductivity and statistics for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.parameters import k_parameters
from thermof.read import read_run_info, read_trial
# ---------------------------------------------------
main = '' # Directory of trials
results_file = '%s-kest-results.yaml' % os.path.basename(main) # Name of results file
# ---------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(k=[], max=[], min=[], std=[], sigma=[], epsilon=[], trial=[])
k_par = k_parameters.copy()
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
ri = read_run_info(os.path.join(trial, 'Run1'))
results['sigma'].append(ri['sigma'])
results['epsilon'].append(ri['epsilon'])
results['trial'].append(os.path.basename(trial))
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
# sim = Simulation(read=trial, setup='trial', parameters=k_par)
trial = read_trial(trial, k_par=k_par, t0=5, t1=10, verbose=False)
results['k'].append(trial['avg']['k_est']['iso'])
results['max'].append(trial['avg']['k_est']['stats']['iso']['max'])
results['min'].append(trial['avg']['k_est']['stats']['iso']['min'])
results['std'].append(float(trial['avg']['k_est']['stats']['iso']['std']))
print('k: %.2f | std: %.2f | max: %.2f | min: %.2f'
% (results['k'][-1], results['std'][-1], results['max'][-1], results['min'][-1]))
else:
results['k'].append(None)
results['std'].append(None)
results['max'].append(None)
results['min'].append(None)
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
|
Add script for reading k for list of trials"""
Reads thermal conductivity and statistics for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.parameters import k_parameters
from thermof.read import read_run_info, read_trial
# ---------------------------------------------------
main = '' # Directory of trials
results_file = '%s-kest-results.yaml' % os.path.basename(main) # Name of results file
# ---------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(k=[], max=[], min=[], std=[], sigma=[], epsilon=[], trial=[])
k_par = k_parameters.copy()
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
ri = read_run_info(os.path.join(trial, 'Run1'))
results['sigma'].append(ri['sigma'])
results['epsilon'].append(ri['epsilon'])
results['trial'].append(os.path.basename(trial))
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
# sim = Simulation(read=trial, setup='trial', parameters=k_par)
trial = read_trial(trial, k_par=k_par, t0=5, t1=10, verbose=False)
results['k'].append(trial['avg']['k_est']['iso'])
results['max'].append(trial['avg']['k_est']['stats']['iso']['max'])
results['min'].append(trial['avg']['k_est']['stats']['iso']['min'])
results['std'].append(float(trial['avg']['k_est']['stats']['iso']['std']))
print('k: %.2f | std: %.2f | max: %.2f | min: %.2f'
% (results['k'][-1], results['std'][-1], results['max'][-1], results['min'][-1]))
else:
results['k'].append(None)
results['std'].append(None)
results['max'].append(None)
results['min'].append(None)
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
|
<commit_before><commit_msg>Add script for reading k for list of trials<commit_after>"""
Reads thermal conductivity and statistics for a list of trials and saves results to a yaml file
"""
import os
import yaml
from thermof.parameters import k_parameters
from thermof.read import read_run_info, read_trial
# ---------------------------------------------------
main = '' # Directory of trials
results_file = '%s-kest-results.yaml' % os.path.basename(main) # Name of results file
# ---------------------------------------------------
trial_list = [os.path.join(main, i) for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
results = dict(k=[], max=[], min=[], std=[], sigma=[], epsilon=[], trial=[])
k_par = k_parameters.copy()
for trial_index, trial in enumerate(trial_list, start=1):
trial_name = os.path.basename(trial)
print('\n%i / %i | %s #################################' % (trial_index, len(trial_list), trial_name), flush=True)
ri = read_run_info(os.path.join(trial, 'Run1'))
results['sigma'].append(ri['sigma'])
results['epsilon'].append(ri['epsilon'])
results['trial'].append(os.path.basename(trial))
if trial_name not in ['S6.00-E0.80', 'S6.00-E1.00']:
# sim = Simulation(read=trial, setup='trial', parameters=k_par)
trial = read_trial(trial, k_par=k_par, t0=5, t1=10, verbose=False)
results['k'].append(trial['avg']['k_est']['iso'])
results['max'].append(trial['avg']['k_est']['stats']['iso']['max'])
results['min'].append(trial['avg']['k_est']['stats']['iso']['min'])
results['std'].append(float(trial['avg']['k_est']['stats']['iso']['std']))
print('k: %.2f | std: %.2f | max: %.2f | min: %.2f'
% (results['k'][-1], results['std'][-1], results['max'][-1], results['min'][-1]))
else:
results['k'].append(None)
results['std'].append(None)
results['max'].append(None)
results['min'].append(None)
with open(results_file, 'w') as rfile:
yaml.dump(results, rfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.