commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54c26b6d7dbc26b155ace172cebb9a0ac722060b
|
pymatgen/symmetry/tests/test_spacegroup.py
|
pymatgen/symmetry/tests/test_spacegroup.py
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: d446365ad2622bc7871817613f98cebbefb7125f [formerly dbf71a7f4973d22259e81e095402d39acb823651]
Former-commit-id: a5817cbc15c7c0e6f568773b47b29db34200dd87
|
Python
|
mit
|
gpetretto/pymatgen,tschaume/pymatgen,czhengsci/pymatgen,dongsenfo/pymatgen,matk86/pymatgen,tallakahath/pymatgen,nisse3000/pymatgen,fraricci/pymatgen,aykol/pymatgen,czhengsci/pymatgen,montoyjh/pymatgen,johnson1228/pymatgen,Bismarrck/pymatgen,gmatteo/pymatgen,ndardenne/pymatgen,richardtran415/pymatgen,gVallverdu/pymatgen,matk86/pymatgen,dongsenfo/pymatgen,fraricci/pymatgen,richardtran415/pymatgen,Bismarrck/pymatgen,gpetretto/pymatgen,nisse3000/pymatgen,vorwerkc/pymatgen,montoyjh/pymatgen,mbkumar/pymatgen,mbkumar/pymatgen,gpetretto/pymatgen,setten/pymatgen,fraricci/pymatgen,dongsenfo/pymatgen,matk86/pymatgen,tschaume/pymatgen,xhqu1981/pymatgen,matk86/pymatgen,richardtran415/pymatgen,tallakahath/pymatgen,vorwerkc/pymatgen,setten/pymatgen,nisse3000/pymatgen,gpetretto/pymatgen,gmatteo/pymatgen,johnson1228/pymatgen,aykol/pymatgen,xhqu1981/pymatgen,xhqu1981/pymatgen,blondegeek/pymatgen,mbkumar/pymatgen,tallakahath/pymatgen,montoyjh/pymatgen,aykol/pymatgen,tschaume/pymatgen,tschaume/pymatgen,davidwaroquiers/pymatgen,setten/pymatgen,blondegeek/pymatgen,dongsenfo/pymatgen,davidwaroquiers/pymatgen,Bismarrck/pymatgen,ndardenne/pymatgen,Bismarrck/pymatgen,richardtran415/pymatgen,tschaume/pymatgen,vorwerkc/pymatgen,mbkumar/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,johnson1228/pymatgen,nisse3000/pymatgen,fraricci/pymatgen,setten/pymatgen,czhengsci/pymatgen,ndardenne/pymatgen,Bismarrck/pymatgen,czhengsci/pymatgen,gVallverdu/pymatgen,gVallverdu/pymatgen,blondegeek/pymatgen,montoyjh/pymatgen,gVallverdu/pymatgen,johnson1228/pymatgen,blondegeek/pymatgen,davidwaroquiers/pymatgen
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: d446365ad2622bc7871817613f98cebbefb7125f [formerly dbf71a7f4973d22259e81e095402d39acb823651]
Former-commit-id: a5817cbc15c7c0e6f568773b47b29db34200dd87
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: d446365ad2622bc7871817613f98cebbefb7125f [formerly dbf71a7f4973d22259e81e095402d39acb823651]
Former-commit-id: a5817cbc15c7c0e6f568773b47b29db34200dd87<commit_after>
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: d446365ad2622bc7871817613f98cebbefb7125f [formerly dbf71a7f4973d22259e81e095402d39acb823651]
Former-commit-id: a5817cbc15c7c0e6f568773b47b29db34200dd87#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: d446365ad2622bc7871817613f98cebbefb7125f [formerly dbf71a7f4973d22259e81e095402d39acb823651]
Former-commit-id: a5817cbc15c7c0e6f568773b47b29db34200dd87<commit_after>#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
800ed9afbedc83f608c7228de55fc638db79bfdc
|
modernrpc/tests/test_auth_helpers.py
|
modernrpc/tests/test_auth_helpers.py
|
import pytest
import pytest_django
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.contenttypes.models import ContentType
from modernrpc.auth import user_is_logged, user_is_superuser, user_has_perm, user_has_perms
def test_user_is_logged(anonymous_user, john_doe, superuser):
assert user_is_logged(anonymous_user) is False
assert user_is_logged(john_doe) is True
assert user_is_logged(superuser) is True
def test_user_is_superuser(anonymous_user, john_doe, superuser):
assert user_is_superuser(anonymous_user) is False
assert user_is_superuser(john_doe) is False
assert user_is_superuser(superuser) is True
def test_user_has_perm(anonymous_user, john_doe, superuser, auth_permissions):
p = auth_permissions[0]
assert user_has_perm(anonymous_user, p) is False
assert user_has_perm(john_doe, p) is False
assert user_has_perm(superuser, p) is True
john_doe.user_permissions.add(p)
assert user_has_perm(john_doe, p) is True
def test_user_has_perms(anonymous_user, john_doe, superuser, auth_permissions):
perms = auth_permissions[0], auth_permissions[1]
assert user_has_perms(anonymous_user, perms) is False
assert user_has_perms(john_doe, perms) is False
assert user_has_perms(superuser, perms) is True
john_doe.user_permissions.add(auth_permissions[0])
assert user_has_perms(john_doe, perms) is False
john_doe.user_permissions.add(auth_permissions[1])
assert user_has_perms(john_doe, perms) is True
|
Create new tests, for auth helpers
|
Create new tests, for auth helpers
|
Python
|
mit
|
alorence/django-modern-rpc,alorence/django-modern-rpc
|
Create new tests, for auth helpers
|
import pytest
import pytest_django
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.contenttypes.models import ContentType
from modernrpc.auth import user_is_logged, user_is_superuser, user_has_perm, user_has_perms
def test_user_is_logged(anonymous_user, john_doe, superuser):
assert user_is_logged(anonymous_user) is False
assert user_is_logged(john_doe) is True
assert user_is_logged(superuser) is True
def test_user_is_superuser(anonymous_user, john_doe, superuser):
assert user_is_superuser(anonymous_user) is False
assert user_is_superuser(john_doe) is False
assert user_is_superuser(superuser) is True
def test_user_has_perm(anonymous_user, john_doe, superuser, auth_permissions):
p = auth_permissions[0]
assert user_has_perm(anonymous_user, p) is False
assert user_has_perm(john_doe, p) is False
assert user_has_perm(superuser, p) is True
john_doe.user_permissions.add(p)
assert user_has_perm(john_doe, p) is True
def test_user_has_perms(anonymous_user, john_doe, superuser, auth_permissions):
perms = auth_permissions[0], auth_permissions[1]
assert user_has_perms(anonymous_user, perms) is False
assert user_has_perms(john_doe, perms) is False
assert user_has_perms(superuser, perms) is True
john_doe.user_permissions.add(auth_permissions[0])
assert user_has_perms(john_doe, perms) is False
john_doe.user_permissions.add(auth_permissions[1])
assert user_has_perms(john_doe, perms) is True
|
<commit_before><commit_msg>Create new tests, for auth helpers<commit_after>
|
import pytest
import pytest_django
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.contenttypes.models import ContentType
from modernrpc.auth import user_is_logged, user_is_superuser, user_has_perm, user_has_perms
def test_user_is_logged(anonymous_user, john_doe, superuser):
assert user_is_logged(anonymous_user) is False
assert user_is_logged(john_doe) is True
assert user_is_logged(superuser) is True
def test_user_is_superuser(anonymous_user, john_doe, superuser):
assert user_is_superuser(anonymous_user) is False
assert user_is_superuser(john_doe) is False
assert user_is_superuser(superuser) is True
def test_user_has_perm(anonymous_user, john_doe, superuser, auth_permissions):
p = auth_permissions[0]
assert user_has_perm(anonymous_user, p) is False
assert user_has_perm(john_doe, p) is False
assert user_has_perm(superuser, p) is True
john_doe.user_permissions.add(p)
assert user_has_perm(john_doe, p) is True
def test_user_has_perms(anonymous_user, john_doe, superuser, auth_permissions):
perms = auth_permissions[0], auth_permissions[1]
assert user_has_perms(anonymous_user, perms) is False
assert user_has_perms(john_doe, perms) is False
assert user_has_perms(superuser, perms) is True
john_doe.user_permissions.add(auth_permissions[0])
assert user_has_perms(john_doe, perms) is False
john_doe.user_permissions.add(auth_permissions[1])
assert user_has_perms(john_doe, perms) is True
|
Create new tests, for auth helpersimport pytest
import pytest_django
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.contenttypes.models import ContentType
from modernrpc.auth import user_is_logged, user_is_superuser, user_has_perm, user_has_perms
def test_user_is_logged(anonymous_user, john_doe, superuser):
assert user_is_logged(anonymous_user) is False
assert user_is_logged(john_doe) is True
assert user_is_logged(superuser) is True
def test_user_is_superuser(anonymous_user, john_doe, superuser):
assert user_is_superuser(anonymous_user) is False
assert user_is_superuser(john_doe) is False
assert user_is_superuser(superuser) is True
def test_user_has_perm(anonymous_user, john_doe, superuser, auth_permissions):
p = auth_permissions[0]
assert user_has_perm(anonymous_user, p) is False
assert user_has_perm(john_doe, p) is False
assert user_has_perm(superuser, p) is True
john_doe.user_permissions.add(p)
assert user_has_perm(john_doe, p) is True
def test_user_has_perms(anonymous_user, john_doe, superuser, auth_permissions):
perms = auth_permissions[0], auth_permissions[1]
assert user_has_perms(anonymous_user, perms) is False
assert user_has_perms(john_doe, perms) is False
assert user_has_perms(superuser, perms) is True
john_doe.user_permissions.add(auth_permissions[0])
assert user_has_perms(john_doe, perms) is False
john_doe.user_permissions.add(auth_permissions[1])
assert user_has_perms(john_doe, perms) is True
|
<commit_before><commit_msg>Create new tests, for auth helpers<commit_after>import pytest
import pytest_django
from django.contrib.auth.models import AnonymousUser, Permission
from django.contrib.contenttypes.models import ContentType
from modernrpc.auth import user_is_logged, user_is_superuser, user_has_perm, user_has_perms
def test_user_is_logged(anonymous_user, john_doe, superuser):
assert user_is_logged(anonymous_user) is False
assert user_is_logged(john_doe) is True
assert user_is_logged(superuser) is True
def test_user_is_superuser(anonymous_user, john_doe, superuser):
assert user_is_superuser(anonymous_user) is False
assert user_is_superuser(john_doe) is False
assert user_is_superuser(superuser) is True
def test_user_has_perm(anonymous_user, john_doe, superuser, auth_permissions):
p = auth_permissions[0]
assert user_has_perm(anonymous_user, p) is False
assert user_has_perm(john_doe, p) is False
assert user_has_perm(superuser, p) is True
john_doe.user_permissions.add(p)
assert user_has_perm(john_doe, p) is True
def test_user_has_perms(anonymous_user, john_doe, superuser, auth_permissions):
perms = auth_permissions[0], auth_permissions[1]
assert user_has_perms(anonymous_user, perms) is False
assert user_has_perms(john_doe, perms) is False
assert user_has_perms(superuser, perms) is True
john_doe.user_permissions.add(auth_permissions[0])
assert user_has_perms(john_doe, perms) is False
john_doe.user_permissions.add(auth_permissions[1])
assert user_has_perms(john_doe, perms) is True
|
|
d2acba5a6a6d9049e46be89185c6b69803316ed2
|
scripts/fcn_learning_movie.py
|
scripts/fcn_learning_movie.py
|
#!/usr/bin/env python
import argparse
import glob
import os.path as osp
import re
import subprocess
import tempfile
import scipy.misc
import fcn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'log_dir', help='Log dir which contains log_XX.png and viz_XX.png')
args = parser.parse_args()
log_dir = args.log_dir
tmpdir = tempfile.mkdtemp()
for log_file in glob.glob(osp.join(log_dir, 'log_*.png')):
# get log image
img_log = scipy.misc.imread(log_file, mode='RGB')
# get visualized image
match = re.match('log_([0-9]*).png', osp.basename(log_file))
iter_stop = int(match.groups()[0])
viz_file = osp.join(log_dir, 'viz_{}.png'.format(iter_stop))
img_viz = scipy.misc.imread(viz_file, mode='RGB')
# save tiled image
img_tiled = fcn.util.get_tile_image(
[img_log, img_viz], tile_shape=(2, 1),
margin_color=(255, 255, 255))
out_file = osp.join(tmpdir, '{}.png'.format(iter_stop))
scipy.misc.imsave(out_file, img_tiled)
# generate gif from images
tmp_file = osp.join(tmpdir, '*.png')
out_file = osp.join(log_dir, 'learning.gif')
cmd = 'convert $(ls -v {}) gif:- | gifsicle -O3 --colors 256 > {}'\
.format(tmp_file, out_file)
subprocess.call(cmd, shell=True)
print('wrote result: {}'.format(out_file))
if __name__ == '__main__':
main()
|
Add script to generate gif from images
|
Add script to generate gif from images
|
Python
|
mit
|
wkentaro/fcn
|
Add script to generate gif from images
|
#!/usr/bin/env python
import argparse
import glob
import os.path as osp
import re
import subprocess
import tempfile
import scipy.misc
import fcn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'log_dir', help='Log dir which contains log_XX.png and viz_XX.png')
args = parser.parse_args()
log_dir = args.log_dir
tmpdir = tempfile.mkdtemp()
for log_file in glob.glob(osp.join(log_dir, 'log_*.png')):
# get log image
img_log = scipy.misc.imread(log_file, mode='RGB')
# get visualized image
match = re.match('log_([0-9]*).png', osp.basename(log_file))
iter_stop = int(match.groups()[0])
viz_file = osp.join(log_dir, 'viz_{}.png'.format(iter_stop))
img_viz = scipy.misc.imread(viz_file, mode='RGB')
# save tiled image
img_tiled = fcn.util.get_tile_image(
[img_log, img_viz], tile_shape=(2, 1),
margin_color=(255, 255, 255))
out_file = osp.join(tmpdir, '{}.png'.format(iter_stop))
scipy.misc.imsave(out_file, img_tiled)
# generate gif from images
tmp_file = osp.join(tmpdir, '*.png')
out_file = osp.join(log_dir, 'learning.gif')
cmd = 'convert $(ls -v {}) gif:- | gifsicle -O3 --colors 256 > {}'\
.format(tmp_file, out_file)
subprocess.call(cmd, shell=True)
print('wrote result: {}'.format(out_file))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate gif from images<commit_after>
|
#!/usr/bin/env python
import argparse
import glob
import os.path as osp
import re
import subprocess
import tempfile
import scipy.misc
import fcn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'log_dir', help='Log dir which contains log_XX.png and viz_XX.png')
args = parser.parse_args()
log_dir = args.log_dir
tmpdir = tempfile.mkdtemp()
for log_file in glob.glob(osp.join(log_dir, 'log_*.png')):
# get log image
img_log = scipy.misc.imread(log_file, mode='RGB')
# get visualized image
match = re.match('log_([0-9]*).png', osp.basename(log_file))
iter_stop = int(match.groups()[0])
viz_file = osp.join(log_dir, 'viz_{}.png'.format(iter_stop))
img_viz = scipy.misc.imread(viz_file, mode='RGB')
# save tiled image
img_tiled = fcn.util.get_tile_image(
[img_log, img_viz], tile_shape=(2, 1),
margin_color=(255, 255, 255))
out_file = osp.join(tmpdir, '{}.png'.format(iter_stop))
scipy.misc.imsave(out_file, img_tiled)
# generate gif from images
tmp_file = osp.join(tmpdir, '*.png')
out_file = osp.join(log_dir, 'learning.gif')
cmd = 'convert $(ls -v {}) gif:- | gifsicle -O3 --colors 256 > {}'\
.format(tmp_file, out_file)
subprocess.call(cmd, shell=True)
print('wrote result: {}'.format(out_file))
if __name__ == '__main__':
main()
|
Add script to generate gif from images#!/usr/bin/env python
import argparse
import glob
import os.path as osp
import re
import subprocess
import tempfile
import scipy.misc
import fcn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'log_dir', help='Log dir which contains log_XX.png and viz_XX.png')
args = parser.parse_args()
log_dir = args.log_dir
tmpdir = tempfile.mkdtemp()
for log_file in glob.glob(osp.join(log_dir, 'log_*.png')):
# get log image
img_log = scipy.misc.imread(log_file, mode='RGB')
# get visualized image
match = re.match('log_([0-9]*).png', osp.basename(log_file))
iter_stop = int(match.groups()[0])
viz_file = osp.join(log_dir, 'viz_{}.png'.format(iter_stop))
img_viz = scipy.misc.imread(viz_file, mode='RGB')
# save tiled image
img_tiled = fcn.util.get_tile_image(
[img_log, img_viz], tile_shape=(2, 1),
margin_color=(255, 255, 255))
out_file = osp.join(tmpdir, '{}.png'.format(iter_stop))
scipy.misc.imsave(out_file, img_tiled)
# generate gif from images
tmp_file = osp.join(tmpdir, '*.png')
out_file = osp.join(log_dir, 'learning.gif')
cmd = 'convert $(ls -v {}) gif:- | gifsicle -O3 --colors 256 > {}'\
.format(tmp_file, out_file)
subprocess.call(cmd, shell=True)
print('wrote result: {}'.format(out_file))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate gif from images<commit_after>#!/usr/bin/env python
import argparse
import glob
import os.path as osp
import re
import subprocess
import tempfile
import scipy.misc
import fcn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'log_dir', help='Log dir which contains log_XX.png and viz_XX.png')
args = parser.parse_args()
log_dir = args.log_dir
tmpdir = tempfile.mkdtemp()
for log_file in glob.glob(osp.join(log_dir, 'log_*.png')):
# get log image
img_log = scipy.misc.imread(log_file, mode='RGB')
# get visualized image
match = re.match('log_([0-9]*).png', osp.basename(log_file))
iter_stop = int(match.groups()[0])
viz_file = osp.join(log_dir, 'viz_{}.png'.format(iter_stop))
img_viz = scipy.misc.imread(viz_file, mode='RGB')
# save tiled image
img_tiled = fcn.util.get_tile_image(
[img_log, img_viz], tile_shape=(2, 1),
margin_color=(255, 255, 255))
out_file = osp.join(tmpdir, '{}.png'.format(iter_stop))
scipy.misc.imsave(out_file, img_tiled)
# generate gif from images
tmp_file = osp.join(tmpdir, '*.png')
out_file = osp.join(log_dir, 'learning.gif')
cmd = 'convert $(ls -v {}) gif:- | gifsicle -O3 --colors 256 > {}'\
.format(tmp_file, out_file)
subprocess.call(cmd, shell=True)
print('wrote result: {}'.format(out_file))
if __name__ == '__main__':
main()
|
|
8825d67bd8913cafcd1ff3215a9ef145a92b18c2
|
scripts/impute_names_model.py
|
scripts/impute_names_model.py
|
"""
"""
from framework.auth.utils import parse_name
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
def impute_names():
for user in models.User.find():
parsed = parse_name(user.fullname)
for field, value in parsed.items():
print field, value
setattr(user, field, value)
user.save()
if __name__ == '__main__':
impute_names()
|
Add script to impute name parts
|
Add script to impute name parts
|
Python
|
apache-2.0
|
mattclark/osf.io,doublebits/osf.io,doublebits/osf.io,crcresearch/osf.io,cwisecarver/osf.io,cosenal/osf.io,cslzchen/osf.io,doublebits/osf.io,chennan47/osf.io,bdyetton/prettychart,bdyetton/prettychart,icereval/osf.io,CenterForOpenScience/osf.io,danielneis/osf.io,cosenal/osf.io,KAsante95/osf.io,icereval/osf.io,jinluyuan/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,sloria/osf.io,jnayak1/osf.io,kushG/osf.io,felliott/osf.io,KAsante95/osf.io,zamattiac/osf.io,TomHeatwole/osf.io,jmcarp/osf.io,arpitar/osf.io,emetsger/osf.io,barbour-em/osf.io,jnayak1/osf.io,kwierman/osf.io,ZobairAlijan/osf.io,rdhyee/osf.io,jeffreyliu3230/osf.io,cldershem/osf.io,lyndsysimon/osf.io,jmcarp/osf.io,billyhunt/osf.io,zamattiac/osf.io,himanshuo/osf.io,kushG/osf.io,cosenal/osf.io,dplorimer/osf,zkraime/osf.io,danielneis/osf.io,TomBaxter/osf.io,mfraezz/osf.io,billyhunt/osf.io,billyhunt/osf.io,haoyuchen1992/osf.io,zkraime/osf.io,arpitar/osf.io,amyshi188/osf.io,sbt9uc/osf.io,revanthkolli/osf.io,MerlinZhang/osf.io,asanfilippo7/osf.io,fabianvf/osf.io,HarryRybacki/osf.io,njantrania/osf.io,erinspace/osf.io,reinaH/osf.io,ckc6cz/osf.io,samanehsan/osf.io,rdhyee/osf.io,acshi/osf.io,arpitar/osf.io,emetsger/osf.io,zachjanicki/osf.io,petermalcolm/osf.io,crcresearch/osf.io,jinluyuan/osf.io,alexschiller/osf.io,icereval/osf.io,felliott/osf.io,mluke93/osf.io,KAsante95/osf.io,laurenrevere/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,caseyrollins/osf.io,Ghalko/osf.io,KAsante95/osf.io,brianjgeiger/osf.io,kch8qx/osf.io,dplorimer/osf,SSJohns/osf.io,Ghalko/osf.io,monikagrabowska/osf.io,ticklemepierce/osf.io,njantrania/osf.io,abought/osf.io,adlius/osf.io,billyhunt/osf.io,chrisseto/osf.io,RomanZWang/osf.io,brandonPurvis/osf.io,GaryKriebel/osf.io,chrisseto/osf.io,mattclark/osf.io,samchrisinger/osf.io,fabianvf/osf.io,leb2dg/osf.io,mluo613/osf.io,cosenal/osf.io,felliott/osf.io,brandonPurvis/osf.io,mluke93/osf.io,lyndsysimon/osf.io,reinaH/osf.io,wearpants/osf.io,asanfilippo7/osf.io,lamdnhan/osf.io,doublebits/osf.io,mfraezz/osf.io,haoyuchen1992/osf.io,ticklemepierce/osf.io,MerlinZhang/osf.io,jinluyuan/osf.io,ckc6cz/osf.io,baylee-d/osf.io,RomanZWang/osf.io,jolene-esposito/osf.io,ZobairAlijan/osf.io,mluo613/osf.io,aaxelb/osf.io,barbour-em/osf.io,kushG/osf.io,kch8qx/osf.io,wearpants/osf.io,pattisdr/osf.io,reinaH/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,jolene-esposito/osf.io,cldershem/osf.io,binoculars/osf.io,TomHeatwole/osf.io,RomanZWang/osf.io,caneruguz/osf.io,revanthkolli/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,GaryKriebel/osf.io,sbt9uc/osf.io,HarryRybacki/osf.io,sloria/osf.io,acshi/osf.io,erinspace/osf.io,pattisdr/osf.io,monikagrabowska/osf.io,petermalcolm/osf.io,wearpants/osf.io,Johnetordoff/osf.io,revanthkolli/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,haoyuchen1992/osf.io,RomanZWang/osf.io,AndrewSallans/osf.io,mfraezz/osf.io,caseyrygt/osf.io,mfraezz/osf.io,DanielSBrown/osf.io,mluke93/osf.io,chennan47/osf.io,cldershem/osf.io,kushG/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,SSJohns/osf.io,ckc6cz/osf.io,zachjanicki/osf.io,cwisecarver/osf.io,doublebits/osf.io,ticklemepierce/osf.io,adlius/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,caseyrygt/osf.io,zkraime/osf.io,acshi/osf.io,jeffreyliu3230/osf.io,jnayak1/osf.io,ZobairAlijan/osf.io,caseyrollins/osf.io,alexschiller/osf.io,dplorimer/osf,lamdnhan/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,HarryRybacki/osf.io,barbour-em/osf.io,acshi/osf.io,zkraime/osf.io,cslzchen/osf.io,fabianvf/osf.io,ZobairAlijan/osf.io,danielneis/osf.io,leb2dg/osf.io,rdhyee/osf.io,SSJohns/osf.io,brandonPurvis/osf.io,himanshuo/osf.io,lyndsysimon/osf.io,samchrisinger/osf.io,caseyrollins/osf.io,TomBaxter/osf.io,samchrisinger/osf.io,jeffreyliu3230/osf.io,petermalcolm/osf.io,emetsger/osf.io,caneruguz/osf.io,zamattiac/osf.io,mattclark/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,samanehsan/osf.io,emetsger/osf.io,GageGaskins/osf.io,binoculars/osf.io,abought/osf.io,SSJohns/osf.io,GageGaskins/osf.io,jolene-esposito/osf.io,jmcarp/osf.io,dplorimer/osf,laurenrevere/osf.io,adlius/osf.io,zachjanicki/osf.io,chennan47/osf.io,AndrewSallans/osf.io,arpitar/osf.io,DanielSBrown/osf.io,bdyetton/prettychart,binoculars/osf.io,kch8qx/osf.io,abought/osf.io,amyshi188/osf.io,lamdnhan/osf.io,asanfilippo7/osf.io,chrisseto/osf.io,amyshi188/osf.io,sloria/osf.io,kwierman/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,sbt9uc/osf.io,ticklemepierce/osf.io,hmoco/osf.io,aaxelb/osf.io,KAsante95/osf.io,adlius/osf.io,pattisdr/osf.io,billyhunt/osf.io,TomBaxter/osf.io,leb2dg/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,jinluyuan/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,zachjanicki/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,danielneis/osf.io,acshi/osf.io,zamattiac/osf.io,himanshuo/osf.io,bdyetton/prettychart,HarryRybacki/osf.io,hmoco/osf.io,reinaH/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,felliott/osf.io,laurenrevere/osf.io,wearpants/osf.io,crcresearch/osf.io,MerlinZhang/osf.io,mluke93/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,MerlinZhang/osf.io,cldershem/osf.io,njantrania/osf.io,himanshuo/osf.io,kch8qx/osf.io,CenterForOpenScience/osf.io,mluo613/osf.io,caneruguz/osf.io,kwierman/osf.io,Ghalko/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,rdhyee/osf.io,jolene-esposito/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,jmcarp/osf.io,mluo613/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,caseyrygt/osf.io,abought/osf.io,GageGaskins/osf.io,kwierman/osf.io,Nesiehr/osf.io,jeffreyliu3230/osf.io,brandonPurvis/osf.io,fabianvf/osf.io,hmoco/osf.io,Johnetordoff/osf.io,samanehsan/osf.io,hmoco/osf.io,GaryKriebel/osf.io,Ghalko/osf.io,lamdnhan/osf.io,revanthkolli/osf.io,alexschiller/osf.io,erinspace/osf.io,amyshi188/osf.io,GageGaskins/osf.io,njantrania/osf.io,petermalcolm/osf.io,samanehsan/osf.io,Nesiehr/osf.io,TomHeatwole/osf.io,CenterForOpenScience/osf.io,alexschiller/osf.io,cwisecarver/osf.io,GaryKriebel/osf.io,ckc6cz/osf.io,saradbowman/osf.io,barbour-em/osf.io,Nesiehr/osf.io,mluo613/osf.io
|
Add script to impute name parts
|
"""
"""
from framework.auth.utils import parse_name
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
def impute_names():
for user in models.User.find():
parsed = parse_name(user.fullname)
for field, value in parsed.items():
print field, value
setattr(user, field, value)
user.save()
if __name__ == '__main__':
impute_names()
|
<commit_before><commit_msg>Add script to impute name parts<commit_after>
|
"""
"""
from framework.auth.utils import parse_name
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
def impute_names():
for user in models.User.find():
parsed = parse_name(user.fullname)
for field, value in parsed.items():
print field, value
setattr(user, field, value)
user.save()
if __name__ == '__main__':
impute_names()
|
Add script to impute name parts"""
"""
from framework.auth.utils import parse_name
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
def impute_names():
for user in models.User.find():
parsed = parse_name(user.fullname)
for field, value in parsed.items():
print field, value
setattr(user, field, value)
user.save()
if __name__ == '__main__':
impute_names()
|
<commit_before><commit_msg>Add script to impute name parts<commit_after>"""
"""
from framework.auth.utils import parse_name
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
def impute_names():
for user in models.User.find():
parsed = parse_name(user.fullname)
for field, value in parsed.items():
print field, value
setattr(user, field, value)
user.save()
if __name__ == '__main__':
impute_names()
|
|
15a0bb09b704d4abfbf35ba693f66853c6ed030d
|
corehq/apps/cleanup/management/commands/republish_forms_rebuild_cases.py
|
corehq/apps/cleanup/management/commands/republish_forms_rebuild_cases.py
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
from io import open
from django.core.management.base import BaseCommand
from casexml.apps.case.xform import get_case_ids_from_form
from corehq.form_processor.backends.couch.dbaccessors import FormAccessorCouch
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL
from corehq.form_processor.change_publishers import publish_form_saved
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import FormReprocessRebuild
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Republish form changes and rebuild cases')
def add_arguments(self, parser):
parser.add_argument('form_ids_file')
def handle(self, form_ids_file, **options):
cases_rebuilt = 0
errored_form_ids = set()
with open(form_ids_file, 'r') as f:
lines = f.readlines()
form_ids = [l.strip() for l in lines]
for form_id in with_progress_bar(form_ids):
try:
form = get_form(form_id)
publish_form_saved(form)
cases_rebuilt += rebuild_case_changes(form)
except Exception:
errored_form_ids.add(form_id)
logger.info("Rebuilt {} cases from {} forms. {} errors".format(
cases_rebuilt, len(form_ids), len(errored_form_ids)))
if errored_form_ids:
logger.error("errors in forms:\n{}".format("\n".join(errored_form_ids)))
with open('form_rebuild_errors.txt', 'w+') as f:
print("\n".join(errored_form_ids), file=f)
def rebuild_case_changes(form, rebuild_reason=None):
"""
Publishes changes for the form and rebuilds any touched cases.
"""
domain = form.domain
case_ids = get_case_ids_from_form(form)
for case_id in case_ids:
detail = FormReprocessRebuild(form_id=form.form_id)
FormProcessorInterface(domain).hard_rebuild_case(case_id, detail)
return len(case_ids)
def get_form(form_id):
try:
return FormAccessorSQL.get_form(form_id)
except XFormNotFound:
pass
return FormAccessorCouch.get_form(form_id)
|
Add command to rebuild cases and republish forms
|
Add command to rebuild cases and republish forms
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command to rebuild cases and republish forms
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
from io import open
from django.core.management.base import BaseCommand
from casexml.apps.case.xform import get_case_ids_from_form
from corehq.form_processor.backends.couch.dbaccessors import FormAccessorCouch
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL
from corehq.form_processor.change_publishers import publish_form_saved
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import FormReprocessRebuild
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Republish form changes and rebuild cases')
def add_arguments(self, parser):
parser.add_argument('form_ids_file')
def handle(self, form_ids_file, **options):
cases_rebuilt = 0
errored_form_ids = set()
with open(form_ids_file, 'r') as f:
lines = f.readlines()
form_ids = [l.strip() for l in lines]
for form_id in with_progress_bar(form_ids):
try:
form = get_form(form_id)
publish_form_saved(form)
cases_rebuilt += rebuild_case_changes(form)
except Exception:
errored_form_ids.add(form_id)
logger.info("Rebuilt {} cases from {} forms. {} errors".format(
cases_rebuilt, len(form_ids), len(errored_form_ids)))
if errored_form_ids:
logger.error("errors in forms:\n{}".format("\n".join(errored_form_ids)))
with open('form_rebuild_errors.txt', 'w+') as f:
print("\n".join(errored_form_ids), file=f)
def rebuild_case_changes(form, rebuild_reason=None):
"""
Publishes changes for the form and rebuilds any touched cases.
"""
domain = form.domain
case_ids = get_case_ids_from_form(form)
for case_id in case_ids:
detail = FormReprocessRebuild(form_id=form.form_id)
FormProcessorInterface(domain).hard_rebuild_case(case_id, detail)
return len(case_ids)
def get_form(form_id):
try:
return FormAccessorSQL.get_form(form_id)
except XFormNotFound:
pass
return FormAccessorCouch.get_form(form_id)
|
<commit_before><commit_msg>Add command to rebuild cases and republish forms<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
from io import open
from django.core.management.base import BaseCommand
from casexml.apps.case.xform import get_case_ids_from_form
from corehq.form_processor.backends.couch.dbaccessors import FormAccessorCouch
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL
from corehq.form_processor.change_publishers import publish_form_saved
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import FormReprocessRebuild
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Republish form changes and rebuild cases')
def add_arguments(self, parser):
parser.add_argument('form_ids_file')
def handle(self, form_ids_file, **options):
cases_rebuilt = 0
errored_form_ids = set()
with open(form_ids_file, 'r') as f:
lines = f.readlines()
form_ids = [l.strip() for l in lines]
for form_id in with_progress_bar(form_ids):
try:
form = get_form(form_id)
publish_form_saved(form)
cases_rebuilt += rebuild_case_changes(form)
except Exception:
errored_form_ids.add(form_id)
logger.info("Rebuilt {} cases from {} forms. {} errors".format(
cases_rebuilt, len(form_ids), len(errored_form_ids)))
if errored_form_ids:
logger.error("errors in forms:\n{}".format("\n".join(errored_form_ids)))
with open('form_rebuild_errors.txt', 'w+') as f:
print("\n".join(errored_form_ids), file=f)
def rebuild_case_changes(form, rebuild_reason=None):
"""
Publishes changes for the form and rebuilds any touched cases.
"""
domain = form.domain
case_ids = get_case_ids_from_form(form)
for case_id in case_ids:
detail = FormReprocessRebuild(form_id=form.form_id)
FormProcessorInterface(domain).hard_rebuild_case(case_id, detail)
return len(case_ids)
def get_form(form_id):
try:
return FormAccessorSQL.get_form(form_id)
except XFormNotFound:
pass
return FormAccessorCouch.get_form(form_id)
|
Add command to rebuild cases and republish formsfrom __future__ import absolute_import, print_function, unicode_literals
import logging
from io import open
from django.core.management.base import BaseCommand
from casexml.apps.case.xform import get_case_ids_from_form
from corehq.form_processor.backends.couch.dbaccessors import FormAccessorCouch
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL
from corehq.form_processor.change_publishers import publish_form_saved
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import FormReprocessRebuild
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Republish form changes and rebuild cases')
def add_arguments(self, parser):
parser.add_argument('form_ids_file')
def handle(self, form_ids_file, **options):
cases_rebuilt = 0
errored_form_ids = set()
with open(form_ids_file, 'r') as f:
lines = f.readlines()
form_ids = [l.strip() for l in lines]
for form_id in with_progress_bar(form_ids):
try:
form = get_form(form_id)
publish_form_saved(form)
cases_rebuilt += rebuild_case_changes(form)
except Exception:
errored_form_ids.add(form_id)
logger.info("Rebuilt {} cases from {} forms. {} errors".format(
cases_rebuilt, len(form_ids), len(errored_form_ids)))
if errored_form_ids:
logger.error("errors in forms:\n{}".format("\n".join(errored_form_ids)))
with open('form_rebuild_errors.txt', 'w+') as f:
print("\n".join(errored_form_ids), file=f)
def rebuild_case_changes(form, rebuild_reason=None):
"""
Publishes changes for the form and rebuilds any touched cases.
"""
domain = form.domain
case_ids = get_case_ids_from_form(form)
for case_id in case_ids:
detail = FormReprocessRebuild(form_id=form.form_id)
FormProcessorInterface(domain).hard_rebuild_case(case_id, detail)
return len(case_ids)
def get_form(form_id):
try:
return FormAccessorSQL.get_form(form_id)
except XFormNotFound:
pass
return FormAccessorCouch.get_form(form_id)
|
<commit_before><commit_msg>Add command to rebuild cases and republish forms<commit_after>from __future__ import absolute_import, print_function, unicode_literals
import logging
from io import open
from django.core.management.base import BaseCommand
from casexml.apps.case.xform import get_case_ids_from_form
from corehq.form_processor.backends.couch.dbaccessors import FormAccessorCouch
from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL
from corehq.form_processor.change_publishers import publish_form_saved
from corehq.form_processor.exceptions import XFormNotFound
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import FormReprocessRebuild
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = ('Republish form changes and rebuild cases')
def add_arguments(self, parser):
parser.add_argument('form_ids_file')
def handle(self, form_ids_file, **options):
cases_rebuilt = 0
errored_form_ids = set()
with open(form_ids_file, 'r') as f:
lines = f.readlines()
form_ids = [l.strip() for l in lines]
for form_id in with_progress_bar(form_ids):
try:
form = get_form(form_id)
publish_form_saved(form)
cases_rebuilt += rebuild_case_changes(form)
except Exception:
errored_form_ids.add(form_id)
logger.info("Rebuilt {} cases from {} forms. {} errors".format(
cases_rebuilt, len(form_ids), len(errored_form_ids)))
if errored_form_ids:
logger.error("errors in forms:\n{}".format("\n".join(errored_form_ids)))
with open('form_rebuild_errors.txt', 'w+') as f:
print("\n".join(errored_form_ids), file=f)
def rebuild_case_changes(form, rebuild_reason=None):
"""
Publishes changes for the form and rebuilds any touched cases.
"""
domain = form.domain
case_ids = get_case_ids_from_form(form)
for case_id in case_ids:
detail = FormReprocessRebuild(form_id=form.form_id)
FormProcessorInterface(domain).hard_rebuild_case(case_id, detail)
return len(case_ids)
def get_form(form_id):
try:
return FormAccessorSQL.get_form(form_id)
except XFormNotFound:
pass
return FormAccessorCouch.get_form(form_id)
|
|
5ae3bf220b08c28596729c682c44d2aa9e599f3c
|
TWLight/users/migrations/0063_check_terms_and_bundle_eligibility.py
|
TWLight/users/migrations/0063_check_terms_and_bundle_eligibility.py
|
from django.db import migrations
def remove_bundle_eligibility_on_users_with_unaccepted_terms(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
for editor in Editor.objects.all():
# If a user has not accepted the terms of use and has bundle eligibility,
# remove the eligibility until user accepts the terms of use
if not editor.user.userprofile.terms_of_use and editor.wp_bundle_eligible:
editor.wp_bundle_eligible = False
editor.save()
class Migration(migrations.Migration):
dependencies = [("users", "0062_delete_hanging_userless_bundle_auths")]
operations = [
migrations.RunPython(remove_bundle_eligibility_on_users_with_unaccepted_terms)
]
|
Add migration to remove eligibility is terms are not accepted
|
Add migration to remove eligibility is terms are not accepted
|
Python
|
mit
|
WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight
|
Add migration to remove eligibility is terms are not accepted
|
from django.db import migrations
def remove_bundle_eligibility_on_users_with_unaccepted_terms(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
for editor in Editor.objects.all():
# If a user has not accepted the terms of use and has bundle eligibility,
# remove the eligibility until user accepts the terms of use
if not editor.user.userprofile.terms_of_use and editor.wp_bundle_eligible:
editor.wp_bundle_eligible = False
editor.save()
class Migration(migrations.Migration):
dependencies = [("users", "0062_delete_hanging_userless_bundle_auths")]
operations = [
migrations.RunPython(remove_bundle_eligibility_on_users_with_unaccepted_terms)
]
|
<commit_before><commit_msg>Add migration to remove eligibility is terms are not accepted<commit_after>
|
from django.db import migrations
def remove_bundle_eligibility_on_users_with_unaccepted_terms(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
for editor in Editor.objects.all():
# If a user has not accepted the terms of use and has bundle eligibility,
# remove the eligibility until user accepts the terms of use
if not editor.user.userprofile.terms_of_use and editor.wp_bundle_eligible:
editor.wp_bundle_eligible = False
editor.save()
class Migration(migrations.Migration):
dependencies = [("users", "0062_delete_hanging_userless_bundle_auths")]
operations = [
migrations.RunPython(remove_bundle_eligibility_on_users_with_unaccepted_terms)
]
|
Add migration to remove eligibility is terms are not acceptedfrom django.db import migrations
def remove_bundle_eligibility_on_users_with_unaccepted_terms(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
for editor in Editor.objects.all():
# If a user has not accepted the terms of use and has bundle eligibility,
# remove the eligibility until user accepts the terms of use
if not editor.user.userprofile.terms_of_use and editor.wp_bundle_eligible:
editor.wp_bundle_eligible = False
editor.save()
class Migration(migrations.Migration):
dependencies = [("users", "0062_delete_hanging_userless_bundle_auths")]
operations = [
migrations.RunPython(remove_bundle_eligibility_on_users_with_unaccepted_terms)
]
|
<commit_before><commit_msg>Add migration to remove eligibility is terms are not accepted<commit_after>from django.db import migrations
def remove_bundle_eligibility_on_users_with_unaccepted_terms(apps, schema_editor):
Editor = apps.get_model("users", "Editor")
for editor in Editor.objects.all():
# If a user has not accepted the terms of use and has bundle eligibility,
# remove the eligibility until user accepts the terms of use
if not editor.user.userprofile.terms_of_use and editor.wp_bundle_eligible:
editor.wp_bundle_eligible = False
editor.save()
class Migration(migrations.Migration):
dependencies = [("users", "0062_delete_hanging_userless_bundle_auths")]
operations = [
migrations.RunPython(remove_bundle_eligibility_on_users_with_unaccepted_terms)
]
|
|
65c97b3ccdafb3df3d3328901541729c553f06df
|
bump-version.py
|
bump-version.py
|
"""Bump the version of this project:
Look for likely files: setup.py, doc/conf.py, etc. and update
version strings to whatever's passed in. At present, no check
is made for sanity purposes.
"""
import os, sys
import re
patterns = [
("winshell.py", r'__VERSION__ = "[^"]+"', '__VERSION__ = "%s"'),
("docs/conf.py", r"version\s*=\s*'[^']+'", "version = '%s'"),
("docs/conf.py", r"release\s*=\s*'[^']+'", "release= '%s'"),
("setup.py", r'version = "[^"]+",', r'version = "%s",'),
]
def replace_version (filename, pattern, replacement):
with open (filename) as inf:
text = inf.read ()
with open (filename, "w") as outf:
outf.write (re.sub (pattern, replacement, text))
def main (new_version):
for filename, pattern, replacement in patterns:
if os.path.exists (filename):
print "Rewriting ", filename
replace_version (filename, pattern, replacement % new_version)
if __name__ == '__main__':
main (*sys.argv[1:])
|
Add a way to bump the version number consistently
|
Add a way to bump the version number consistently
|
Python
|
mit
|
tjguk/winshell,tjguk/winshell,tjguk/winshell
|
Add a way to bump the version number consistently
|
"""Bump the version of this project:
Look for likely files: setup.py, doc/conf.py, etc. and update
version strings to whatever's passed in. At present, no check
is made for sanity purposes.
"""
import os, sys
import re
patterns = [
("winshell.py", r'__VERSION__ = "[^"]+"', '__VERSION__ = "%s"'),
("docs/conf.py", r"version\s*=\s*'[^']+'", "version = '%s'"),
("docs/conf.py", r"release\s*=\s*'[^']+'", "release= '%s'"),
("setup.py", r'version = "[^"]+",', r'version = "%s",'),
]
def replace_version (filename, pattern, replacement):
with open (filename) as inf:
text = inf.read ()
with open (filename, "w") as outf:
outf.write (re.sub (pattern, replacement, text))
def main (new_version):
for filename, pattern, replacement in patterns:
if os.path.exists (filename):
print "Rewriting ", filename
replace_version (filename, pattern, replacement % new_version)
if __name__ == '__main__':
main (*sys.argv[1:])
|
<commit_before><commit_msg>Add a way to bump the version number consistently<commit_after>
|
"""Bump the version of this project:
Look for likely files: setup.py, doc/conf.py, etc. and update
version strings to whatever's passed in. At present, no check
is made for sanity purposes.
"""
import os, sys
import re
patterns = [
("winshell.py", r'__VERSION__ = "[^"]+"', '__VERSION__ = "%s"'),
("docs/conf.py", r"version\s*=\s*'[^']+'", "version = '%s'"),
("docs/conf.py", r"release\s*=\s*'[^']+'", "release= '%s'"),
("setup.py", r'version = "[^"]+",', r'version = "%s",'),
]
def replace_version (filename, pattern, replacement):
with open (filename) as inf:
text = inf.read ()
with open (filename, "w") as outf:
outf.write (re.sub (pattern, replacement, text))
def main (new_version):
for filename, pattern, replacement in patterns:
if os.path.exists (filename):
print "Rewriting ", filename
replace_version (filename, pattern, replacement % new_version)
if __name__ == '__main__':
main (*sys.argv[1:])
|
Add a way to bump the version number consistently"""Bump the version of this project:
Look for likely files: setup.py, doc/conf.py, etc. and update
version strings to whatever's passed in. At present, no check
is made for sanity purposes.
"""
import os, sys
import re
patterns = [
("winshell.py", r'__VERSION__ = "[^"]+"', '__VERSION__ = "%s"'),
("docs/conf.py", r"version\s*=\s*'[^']+'", "version = '%s'"),
("docs/conf.py", r"release\s*=\s*'[^']+'", "release= '%s'"),
("setup.py", r'version = "[^"]+",', r'version = "%s",'),
]
def replace_version (filename, pattern, replacement):
with open (filename) as inf:
text = inf.read ()
with open (filename, "w") as outf:
outf.write (re.sub (pattern, replacement, text))
def main (new_version):
for filename, pattern, replacement in patterns:
if os.path.exists (filename):
print "Rewriting ", filename
replace_version (filename, pattern, replacement % new_version)
if __name__ == '__main__':
main (*sys.argv[1:])
|
<commit_before><commit_msg>Add a way to bump the version number consistently<commit_after>"""Bump the version of this project:
Look for likely files: setup.py, doc/conf.py, etc. and update
version strings to whatever's passed in. At present, no check
is made for sanity purposes.
"""
import os, sys
import re
patterns = [
("winshell.py", r'__VERSION__ = "[^"]+"', '__VERSION__ = "%s"'),
("docs/conf.py", r"version\s*=\s*'[^']+'", "version = '%s'"),
("docs/conf.py", r"release\s*=\s*'[^']+'", "release= '%s'"),
("setup.py", r'version = "[^"]+",', r'version = "%s",'),
]
def replace_version (filename, pattern, replacement):
with open (filename) as inf:
text = inf.read ()
with open (filename, "w") as outf:
outf.write (re.sub (pattern, replacement, text))
def main (new_version):
for filename, pattern, replacement in patterns:
if os.path.exists (filename):
print "Rewriting ", filename
replace_version (filename, pattern, replacement % new_version)
if __name__ == '__main__':
main (*sys.argv[1:])
|
|
9bf713321f309a7652e53015aa9de4671593de74
|
puzzles/shared_count.py
|
puzzles/shared_count.py
|
from threading import Thread, Semaphore
import time
# shared count value
count = 0
def thread_finished(current_count=None):
print "count:", current_count
class Incrementer(object):
def __init__(self, mutex):
self.mutex = mutex
def run(self):
global count
self.mutex.acquire()
new_count = count + 1
# make a context switch on os job scheduler.
time.sleep(0.001)
count = new_count
# callback to get current count value
thread_finished(count)
self.mutex.release()
def main():
mutex = Semaphore(1)
for i in xrange(10):
Thread(target=Incrementer(mutex).run).start()
if __name__ == '__main__':
main()
|
Add shared count incrementer puzzle
|
Add shared count incrementer puzzle
|
Python
|
mit
|
emre/semaphores
|
Add shared count incrementer puzzle
|
from threading import Thread, Semaphore
import time
# shared count value
count = 0
def thread_finished(current_count=None):
print "count:", current_count
class Incrementer(object):
def __init__(self, mutex):
self.mutex = mutex
def run(self):
global count
self.mutex.acquire()
new_count = count + 1
# make a context switch on os job scheduler.
time.sleep(0.001)
count = new_count
# callback to get current count value
thread_finished(count)
self.mutex.release()
def main():
mutex = Semaphore(1)
for i in xrange(10):
Thread(target=Incrementer(mutex).run).start()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add shared count incrementer puzzle<commit_after>
|
from threading import Thread, Semaphore
import time
# shared count value
count = 0
def thread_finished(current_count=None):
print "count:", current_count
class Incrementer(object):
def __init__(self, mutex):
self.mutex = mutex
def run(self):
global count
self.mutex.acquire()
new_count = count + 1
# make a context switch on os job scheduler.
time.sleep(0.001)
count = new_count
# callback to get current count value
thread_finished(count)
self.mutex.release()
def main():
mutex = Semaphore(1)
for i in xrange(10):
Thread(target=Incrementer(mutex).run).start()
if __name__ == '__main__':
main()
|
Add shared count incrementer puzzle
from threading import Thread, Semaphore
import time
# shared count value
count = 0
def thread_finished(current_count=None):
print "count:", current_count
class Incrementer(object):
def __init__(self, mutex):
self.mutex = mutex
def run(self):
global count
self.mutex.acquire()
new_count = count + 1
# make a context switch on os job scheduler.
time.sleep(0.001)
count = new_count
# callback to get current count value
thread_finished(count)
self.mutex.release()
def main():
mutex = Semaphore(1)
for i in xrange(10):
Thread(target=Incrementer(mutex).run).start()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add shared count incrementer puzzle<commit_after>
from threading import Thread, Semaphore
import time
# shared count value
count = 0
def thread_finished(current_count=None):
print "count:", current_count
class Incrementer(object):
def __init__(self, mutex):
self.mutex = mutex
def run(self):
global count
self.mutex.acquire()
new_count = count + 1
# make a context switch on os job scheduler.
time.sleep(0.001)
count = new_count
# callback to get current count value
thread_finished(count)
self.mutex.release()
def main():
mutex = Semaphore(1)
for i in xrange(10):
Thread(target=Incrementer(mutex).run).start()
if __name__ == '__main__':
main()
|
|
84474c08451825a6632e6c8e9c1b60702dab0ad3
|
urbanTwitBot.py
|
urbanTwitBot.py
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
|
Add twitbot for urban dictionary
|
Add twitbot for urban dictionary
|
Python
|
unlicense
|
r3dact3d/tweeter
|
Add twitbot for urban dictionary
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
|
<commit_before><commit_msg>Add twitbot for urban dictionary<commit_after>
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
|
Add twitbot for urban dictionary#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
|
<commit_before><commit_msg>Add twitbot for urban dictionary<commit_after>#!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
|
|
225249f5e677e155da036bb86b157707b9c8ba3d
|
corehq/apps/userreports/management/commands/count_ucrs.py
|
corehq/apps/userreports/management/commands/count_ucrs.py
|
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
from corehq.util.log import with_progress_bar
from corehq.apps.userreports.models import ReportConfiguration
class Command(BaseCommand):
help = "Pull stats about UCR and report-builder reports server-wide"
def handle(self, **options):
config_ids = get_doc_ids_by_class(ReportConfiguration)
total_count = len(config_ids)
builder_count, ucr_count = 0, 0
for doc in with_progress_bar(iter_docs(ReportConfiguration.get_db(), config_ids), total_count):
if doc['report_meta']['created_by_builder']:
builder_count += 1
else:
ucr_count += 1
print(textwrap.dedent("""
As of {}, on {} there are {} total UCRs:
{} Report Builder Reports
{} UCR Report Configs
""".format(datetime.utcnow().date(), settings.SERVER_ENVIRONMENT, total_count,
builder_count, ucr_count)))
|
Add command to count UCRs on an environment
|
Add command to count UCRs on an environment
This is something we pull ~annually. It's simple enough to write, but
hopefully this'll give some confidence that we're pulling the same thing
every time.
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command to count UCRs on an environment
This is something we pull ~annually. It's simple enough to write, but
hopefully this'll give some confidence that we're pulling the same thing
every time.
|
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
from corehq.util.log import with_progress_bar
from corehq.apps.userreports.models import ReportConfiguration
class Command(BaseCommand):
help = "Pull stats about UCR and report-builder reports server-wide"
def handle(self, **options):
config_ids = get_doc_ids_by_class(ReportConfiguration)
total_count = len(config_ids)
builder_count, ucr_count = 0, 0
for doc in with_progress_bar(iter_docs(ReportConfiguration.get_db(), config_ids), total_count):
if doc['report_meta']['created_by_builder']:
builder_count += 1
else:
ucr_count += 1
print(textwrap.dedent("""
As of {}, on {} there are {} total UCRs:
{} Report Builder Reports
{} UCR Report Configs
""".format(datetime.utcnow().date(), settings.SERVER_ENVIRONMENT, total_count,
builder_count, ucr_count)))
|
<commit_before><commit_msg>Add command to count UCRs on an environment
This is something we pull ~annually. It's simple enough to write, but
hopefully this'll give some confidence that we're pulling the same thing
every time.<commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
from corehq.util.log import with_progress_bar
from corehq.apps.userreports.models import ReportConfiguration
class Command(BaseCommand):
help = "Pull stats about UCR and report-builder reports server-wide"
def handle(self, **options):
config_ids = get_doc_ids_by_class(ReportConfiguration)
total_count = len(config_ids)
builder_count, ucr_count = 0, 0
for doc in with_progress_bar(iter_docs(ReportConfiguration.get_db(), config_ids), total_count):
if doc['report_meta']['created_by_builder']:
builder_count += 1
else:
ucr_count += 1
print(textwrap.dedent("""
As of {}, on {} there are {} total UCRs:
{} Report Builder Reports
{} UCR Report Configs
""".format(datetime.utcnow().date(), settings.SERVER_ENVIRONMENT, total_count,
builder_count, ucr_count)))
|
Add command to count UCRs on an environment
This is something we pull ~annually. It's simple enough to write, but
hopefully this'll give some confidence that we're pulling the same thing
every time.from __future__ import absolute_import
from __future__ import print_function
import textwrap
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
from corehq.util.log import with_progress_bar
from corehq.apps.userreports.models import ReportConfiguration
class Command(BaseCommand):
help = "Pull stats about UCR and report-builder reports server-wide"
def handle(self, **options):
config_ids = get_doc_ids_by_class(ReportConfiguration)
total_count = len(config_ids)
builder_count, ucr_count = 0, 0
for doc in with_progress_bar(iter_docs(ReportConfiguration.get_db(), config_ids), total_count):
if doc['report_meta']['created_by_builder']:
builder_count += 1
else:
ucr_count += 1
print(textwrap.dedent("""
As of {}, on {} there are {} total UCRs:
{} Report Builder Reports
{} UCR Report Configs
""".format(datetime.utcnow().date(), settings.SERVER_ENVIRONMENT, total_count,
builder_count, ucr_count)))
|
<commit_before><commit_msg>Add command to count UCRs on an environment
This is something we pull ~annually. It's simple enough to write, but
hopefully this'll give some confidence that we're pulling the same thing
every time.<commit_after>from __future__ import absolute_import
from __future__ import print_function
import textwrap
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
from corehq.util.log import with_progress_bar
from corehq.apps.userreports.models import ReportConfiguration
class Command(BaseCommand):
help = "Pull stats about UCR and report-builder reports server-wide"
def handle(self, **options):
config_ids = get_doc_ids_by_class(ReportConfiguration)
total_count = len(config_ids)
builder_count, ucr_count = 0, 0
for doc in with_progress_bar(iter_docs(ReportConfiguration.get_db(), config_ids), total_count):
if doc['report_meta']['created_by_builder']:
builder_count += 1
else:
ucr_count += 1
print(textwrap.dedent("""
As of {}, on {} there are {} total UCRs:
{} Report Builder Reports
{} UCR Report Configs
""".format(datetime.utcnow().date(), settings.SERVER_ENVIRONMENT, total_count,
builder_count, ucr_count)))
|
|
2eeab9e35badba0c271b1d1671f08347a5c5e06e
|
penchy/tests/test_elements.py
|
penchy/tests/test_elements.py
|
import unittest2
from penchy.tests.util import MockPipelineElement
class PipelineElementHookTest(unittest2.TestCase):
def setUp(self):
self.e = MockPipelineElement()
self.list_ = [23, 42, 5]
def test_pre_hooks(self):
self.e.prehooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
def test_post_hooks(self):
self.e.posthooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
|
Add test for PipelineElement hooks.
|
tests: Add test for PipelineElement hooks.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>
|
Python
|
mit
|
fhirschmann/penchy,fhirschmann/penchy
|
tests: Add test for PipelineElement hooks.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>
|
import unittest2
from penchy.tests.util import MockPipelineElement
class PipelineElementHookTest(unittest2.TestCase):
def setUp(self):
self.e = MockPipelineElement()
self.list_ = [23, 42, 5]
def test_pre_hooks(self):
self.e.prehooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
def test_post_hooks(self):
self.e.posthooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
|
<commit_before><commit_msg>tests: Add test for PipelineElement hooks.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com><commit_after>
|
import unittest2
from penchy.tests.util import MockPipelineElement
class PipelineElementHookTest(unittest2.TestCase):
def setUp(self):
self.e = MockPipelineElement()
self.list_ = [23, 42, 5]
def test_pre_hooks(self):
self.e.prehooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
def test_post_hooks(self):
self.e.posthooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
|
tests: Add test for PipelineElement hooks.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>import unittest2
from penchy.tests.util import MockPipelineElement
class PipelineElementHookTest(unittest2.TestCase):
def setUp(self):
self.e = MockPipelineElement()
self.list_ = [23, 42, 5]
def test_pre_hooks(self):
self.e.prehooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
def test_post_hooks(self):
self.e.posthooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
|
<commit_before><commit_msg>tests: Add test for PipelineElement hooks.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com><commit_after>import unittest2
from penchy.tests.util import MockPipelineElement
class PipelineElementHookTest(unittest2.TestCase):
def setUp(self):
self.e = MockPipelineElement()
self.list_ = [23, 42, 5]
def test_pre_hooks(self):
self.e.prehooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
def test_post_hooks(self):
self.e.posthooks = [
lambda: self.list_.__setitem__(0, 1),
lambda: self.list_.__setitem__(1, 1),
lambda: self.list_.__setitem__(2, 1)]
self.e.run()
self.assertListEqual(self.list_, [1, 1, 1])
|
|
a7a887706c30afb861d0db2e00297e3f918c0f22
|
euler009.py
|
euler009.py
|
#!/usr/bin/python
"""
First attempt to resolve this, with a brute force algorithm,
it may not find the answer
"""
from math import sqrt
TARGET = 1000
LIMIT = int(sqrt(TARGET))
a, b, c = 0, 0, 0
m = 1
while(a + b + c != TARGET and m < LIMIT):
m += 1
a, b, c = 0, 0, 0
n = 1
while(a + b + c < TARGET and n < m):
# This generate almost all the primitive Pythagorean triplets
# if we're lucky the one we search for is primitive
a = (m * m) - (n * n)
b = 2 * m * n
c = (m * m) + (n * n)
n += 1
if a + b + c == TARGET:
print(a * b * c)
else:
print("Answer not found")
|
Add solution for problem 9
|
Add solution for problem 9
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 9
|
#!/usr/bin/python
"""
First attempt to resolve this, with a brute force algorithm,
it may not find the answer
"""
from math import sqrt
TARGET = 1000
LIMIT = int(sqrt(TARGET))
a, b, c = 0, 0, 0
m = 1
while(a + b + c != TARGET and m < LIMIT):
m += 1
a, b, c = 0, 0, 0
n = 1
while(a + b + c < TARGET and n < m):
# This generate almost all the primitive Pythagorean triplets
# if we're lucky the one we search for is primitive
a = (m * m) - (n * n)
b = 2 * m * n
c = (m * m) + (n * n)
n += 1
if a + b + c == TARGET:
print(a * b * c)
else:
print("Answer not found")
|
<commit_before><commit_msg>Add solution for problem 9<commit_after>
|
#!/usr/bin/python
"""
First attempt to resolve this, with a brute force algorithm,
it may not find the answer
"""
from math import sqrt
TARGET = 1000
LIMIT = int(sqrt(TARGET))
a, b, c = 0, 0, 0
m = 1
while(a + b + c != TARGET and m < LIMIT):
m += 1
a, b, c = 0, 0, 0
n = 1
while(a + b + c < TARGET and n < m):
# This generate almost all the primitive Pythagorean triplets
# if we're lucky the one we search for is primitive
a = (m * m) - (n * n)
b = 2 * m * n
c = (m * m) + (n * n)
n += 1
if a + b + c == TARGET:
print(a * b * c)
else:
print("Answer not found")
|
Add solution for problem 9#!/usr/bin/python
"""
First attempt to resolve this, with a brute force algorithm,
it may not find the answer
"""
from math import sqrt
TARGET = 1000
LIMIT = int(sqrt(TARGET))
a, b, c = 0, 0, 0
m = 1
while(a + b + c != TARGET and m < LIMIT):
m += 1
a, b, c = 0, 0, 0
n = 1
while(a + b + c < TARGET and n < m):
# This generate almost all the primitive Pythagorean triplets
# if we're lucky the one we search for is primitive
a = (m * m) - (n * n)
b = 2 * m * n
c = (m * m) + (n * n)
n += 1
if a + b + c == TARGET:
print(a * b * c)
else:
print("Answer not found")
|
<commit_before><commit_msg>Add solution for problem 9<commit_after>#!/usr/bin/python
"""
First attempt to resolve this, with a brute force algorithm,
it may not find the answer
"""
from math import sqrt
TARGET = 1000
LIMIT = int(sqrt(TARGET))
a, b, c = 0, 0, 0
m = 1
while(a + b + c != TARGET and m < LIMIT):
m += 1
a, b, c = 0, 0, 0
n = 1
while(a + b + c < TARGET and n < m):
# This generate almost all the primitive Pythagorean triplets
# if we're lucky the one we search for is primitive
a = (m * m) - (n * n)
b = 2 * m * n
c = (m * m) + (n * n)
n += 1
if a + b + c == TARGET:
print(a * b * c)
else:
print("Answer not found")
|
|
9b4f05df901c0e44cfdda2b4753253e0a263b03a
|
contrib/scrape-azure-prices.py
|
contrib/scrape-azure-prices.py
|
import json
import time
import os
import requests
PRICES_URL = ("https://azure.microsoft.com/api/v3/pricing/"
"virtual-machines/calculator/")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def get_azure_prices():
prices_raw = requests.get(PRICES_URL).json()
region_map = {}
regions = []
for region in prices_raw['regions']:
regions.append(region['slug'])
region_map[region['slug']] = region['displayName']
result = {"windows": {}, "linux": {}}
parsed_sizes = {"lowpriority", "basic", "standard"}
for offer, value in prices_raw['offers'].items():
size_raw = offer.split("-")
# Servers that go by the core with global price are not yet added
if len(size_raw) != 3 or size_raw[2] not in parsed_sizes:
continue
if size_raw[0] not in {'linux', 'windows'}:
continue
size = size_raw[2] + size_raw[1]
prices = {}
if not value['prices'].get('perhour'):
continue
for reg, price in value['prices']['perhour'].items():
region = region_map[reg].lower().replace(" ", "")
region = region.replace("(public)", "") # for germany
region = region.replace("(sovereign)", "") # for germany
prices[region] = price['value']
result[size_raw[0]][size] = prices
return result
def write_azure_prices(file_path, prices):
with open(file_path, 'r') as f:
content = f.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute']['azure_linux'] = prices['linux']
data['compute']['azure_windows'] = prices['windows']
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(file_path, 'w') as fp:
fp.write(content)
def main():
res = get_azure_prices()
write_azure_prices(PRICING_FILE_PATH, res)
if __name__ == "__main__":
main()
|
Add new file for price scraping for azure arm cloud
|
Add new file for price scraping for azure arm cloud
|
Python
|
apache-2.0
|
Kami/libcloud,andrewsomething/libcloud,mistio/libcloud,mistio/libcloud,Kami/libcloud,andrewsomething/libcloud,mistio/libcloud,apache/libcloud,apache/libcloud,andrewsomething/libcloud,apache/libcloud,Kami/libcloud
|
Add new file for price scraping for azure arm cloud
|
import json
import time
import os
import requests
PRICES_URL = ("https://azure.microsoft.com/api/v3/pricing/"
"virtual-machines/calculator/")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def get_azure_prices():
prices_raw = requests.get(PRICES_URL).json()
region_map = {}
regions = []
for region in prices_raw['regions']:
regions.append(region['slug'])
region_map[region['slug']] = region['displayName']
result = {"windows": {}, "linux": {}}
parsed_sizes = {"lowpriority", "basic", "standard"}
for offer, value in prices_raw['offers'].items():
size_raw = offer.split("-")
# Servers that go by the core with global price are not yet added
if len(size_raw) != 3 or size_raw[2] not in parsed_sizes:
continue
if size_raw[0] not in {'linux', 'windows'}:
continue
size = size_raw[2] + size_raw[1]
prices = {}
if not value['prices'].get('perhour'):
continue
for reg, price in value['prices']['perhour'].items():
region = region_map[reg].lower().replace(" ", "")
region = region.replace("(public)", "") # for germany
region = region.replace("(sovereign)", "") # for germany
prices[region] = price['value']
result[size_raw[0]][size] = prices
return result
def write_azure_prices(file_path, prices):
with open(file_path, 'r') as f:
content = f.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute']['azure_linux'] = prices['linux']
data['compute']['azure_windows'] = prices['windows']
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(file_path, 'w') as fp:
fp.write(content)
def main():
res = get_azure_prices()
write_azure_prices(PRICING_FILE_PATH, res)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add new file for price scraping for azure arm cloud<commit_after>
|
import json
import time
import os
import requests
PRICES_URL = ("https://azure.microsoft.com/api/v3/pricing/"
"virtual-machines/calculator/")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def get_azure_prices():
prices_raw = requests.get(PRICES_URL).json()
region_map = {}
regions = []
for region in prices_raw['regions']:
regions.append(region['slug'])
region_map[region['slug']] = region['displayName']
result = {"windows": {}, "linux": {}}
parsed_sizes = {"lowpriority", "basic", "standard"}
for offer, value in prices_raw['offers'].items():
size_raw = offer.split("-")
# Servers that go by the core with global price are not yet added
if len(size_raw) != 3 or size_raw[2] not in parsed_sizes:
continue
if size_raw[0] not in {'linux', 'windows'}:
continue
size = size_raw[2] + size_raw[1]
prices = {}
if not value['prices'].get('perhour'):
continue
for reg, price in value['prices']['perhour'].items():
region = region_map[reg].lower().replace(" ", "")
region = region.replace("(public)", "") # for germany
region = region.replace("(sovereign)", "") # for germany
prices[region] = price['value']
result[size_raw[0]][size] = prices
return result
def write_azure_prices(file_path, prices):
with open(file_path, 'r') as f:
content = f.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute']['azure_linux'] = prices['linux']
data['compute']['azure_windows'] = prices['windows']
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(file_path, 'w') as fp:
fp.write(content)
def main():
res = get_azure_prices()
write_azure_prices(PRICING_FILE_PATH, res)
if __name__ == "__main__":
main()
|
Add new file for price scraping for azure arm cloudimport json
import time
import os
import requests
PRICES_URL = ("https://azure.microsoft.com/api/v3/pricing/"
"virtual-machines/calculator/")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def get_azure_prices():
prices_raw = requests.get(PRICES_URL).json()
region_map = {}
regions = []
for region in prices_raw['regions']:
regions.append(region['slug'])
region_map[region['slug']] = region['displayName']
result = {"windows": {}, "linux": {}}
parsed_sizes = {"lowpriority", "basic", "standard"}
for offer, value in prices_raw['offers'].items():
size_raw = offer.split("-")
# Servers that go by the core with global price are not yet added
if len(size_raw) != 3 or size_raw[2] not in parsed_sizes:
continue
if size_raw[0] not in {'linux', 'windows'}:
continue
size = size_raw[2] + size_raw[1]
prices = {}
if not value['prices'].get('perhour'):
continue
for reg, price in value['prices']['perhour'].items():
region = region_map[reg].lower().replace(" ", "")
region = region.replace("(public)", "") # for germany
region = region.replace("(sovereign)", "") # for germany
prices[region] = price['value']
result[size_raw[0]][size] = prices
return result
def write_azure_prices(file_path, prices):
with open(file_path, 'r') as f:
content = f.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute']['azure_linux'] = prices['linux']
data['compute']['azure_windows'] = prices['windows']
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(file_path, 'w') as fp:
fp.write(content)
def main():
res = get_azure_prices()
write_azure_prices(PRICING_FILE_PATH, res)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add new file for price scraping for azure arm cloud<commit_after>import json
import time
import os
import requests
PRICES_URL = ("https://azure.microsoft.com/api/v3/pricing/"
"virtual-machines/calculator/")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def get_azure_prices():
prices_raw = requests.get(PRICES_URL).json()
region_map = {}
regions = []
for region in prices_raw['regions']:
regions.append(region['slug'])
region_map[region['slug']] = region['displayName']
result = {"windows": {}, "linux": {}}
parsed_sizes = {"lowpriority", "basic", "standard"}
for offer, value in prices_raw['offers'].items():
size_raw = offer.split("-")
# Servers that go by the core with global price are not yet added
if len(size_raw) != 3 or size_raw[2] not in parsed_sizes:
continue
if size_raw[0] not in {'linux', 'windows'}:
continue
size = size_raw[2] + size_raw[1]
prices = {}
if not value['prices'].get('perhour'):
continue
for reg, price in value['prices']['perhour'].items():
region = region_map[reg].lower().replace(" ", "")
region = region.replace("(public)", "") # for germany
region = region.replace("(sovereign)", "") # for germany
prices[region] = price['value']
result[size_raw[0]][size] = prices
return result
def write_azure_prices(file_path, prices):
with open(file_path, 'r') as f:
content = f.read()
data = json.loads(content)
data['updated'] = int(time.time())
data['compute']['azure_linux'] = prices['linux']
data['compute']['azure_windows'] = prices['windows']
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(file_path, 'w') as fp:
fp.write(content)
def main():
res = get_azure_prices()
write_azure_prices(PRICING_FILE_PATH, res)
if __name__ == "__main__":
main()
|
|
7a5c284505dfb0b94d2bc9a1f2bc264cbffa5d83
|
tests/app/soc/views/models/test_sponsor.py
|
tests/app/soc/views/models/test_sponsor.py
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import httplib
from django.http import HttpRequest
from django.core import urlresolvers
from django.utils import simplejson
from tests.test_utils import DjangoTestCase
from google.appengine.api import users
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.middleware.xsrf import XsrfMiddleware
from soc.logic.helper import xsrfutil
from django.test.client import Client
|
Add test for the sponsor views
|
Add test for the sponsor views
|
Python
|
apache-2.0
|
SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange
|
Add test for the sponsor views
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import httplib
from django.http import HttpRequest
from django.core import urlresolvers
from django.utils import simplejson
from tests.test_utils import DjangoTestCase
from google.appengine.api import users
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.middleware.xsrf import XsrfMiddleware
from soc.logic.helper import xsrfutil
from django.test.client import Client
|
<commit_before><commit_msg>Add test for the sponsor views<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import httplib
from django.http import HttpRequest
from django.core import urlresolvers
from django.utils import simplejson
from tests.test_utils import DjangoTestCase
from google.appengine.api import users
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.middleware.xsrf import XsrfMiddleware
from soc.logic.helper import xsrfutil
from django.test.client import Client
|
Add test for the sponsor views#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import httplib
from django.http import HttpRequest
from django.core import urlresolvers
from django.utils import simplejson
from tests.test_utils import DjangoTestCase
from google.appengine.api import users
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.middleware.xsrf import XsrfMiddleware
from soc.logic.helper import xsrfutil
from django.test.client import Client
|
<commit_before><commit_msg>Add test for the sponsor views<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import httplib
from django.http import HttpRequest
from django.core import urlresolvers
from django.utils import simplejson
from tests.test_utils import DjangoTestCase
from google.appengine.api import users
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.middleware.xsrf import XsrfMiddleware
from soc.logic.helper import xsrfutil
from django.test.client import Client
|
|
68f4a9f7393a1f29841463c9b6b7d6bec9a00d6b
|
namuhub/__init__.py
|
namuhub/__init__.py
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
@app.route('/', methods=['POST'])
def namu():
user = request.POST.get('user', None)
if not user:
return '', 501
|
Return http status code 501 when a malformed message is received
|
Return http status code 501 when a malformed message is received
|
Python
|
apache-2.0
|
ssut/namuhub,ssut/namuhub,ssut/namuhub
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
Return http status code 501 when a malformed message is received
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
@app.route('/', methods=['POST'])
def namu():
user = request.POST.get('user', None)
if not user:
return '', 501
|
<commit_before>"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
<commit_msg>Return http status code 501 when a malformed message is received<commit_after>
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
@app.route('/', methods=['POST'])
def namu():
user = request.POST.get('user', None)
if not user:
return '', 501
|
"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
Return http status code 501 when a malformed message is received"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
@app.route('/', methods=['POST'])
def namu():
user = request.POST.get('user', None)
if not user:
return '', 501
|
<commit_before>"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
<commit_msg>Return http status code 501 when a malformed message is received<commit_after>"""namuhub --- namu.wiki contribution graph"""
from flask import Flask, jsonify, render_template, request, url_for
app = Flask('namuhub')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<user>', methods=['GET'])
def index_user(user=''):
return render_template('index.html', **{'user': user})
@app.route('/', methods=['POST'])
def namu():
user = request.POST.get('user', None)
if not user:
return '', 501
|
56bd6c6a0363323cc1f4b3fbbcd460ba446b0c6d
|
cubes/stores.py
|
cubes/stores.py
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise CubesError("Unable to find store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise CubesError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise ConfigurationError("Unknown store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise ConfigurationError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
Raise ConfigurationError error that causes server to fail and dump whole stacktrace
|
Raise ConfigurationError error that causes server to fail and dump whole stacktrace
|
Python
|
mit
|
noyeitan/cubes,ubreddy/cubes,she11c0de/cubes,zejn/cubes,zejn/cubes,pombredanne/cubes,she11c0de/cubes,pombredanne/cubes,ubreddy/cubes,jell0720/cubes,cesarmarinhorj/cubes,noyeitan/cubes,ubreddy/cubes,cesarmarinhorj/cubes,cesarmarinhorj/cubes,zejn/cubes,jell0720/cubes,noyeitan/cubes,pombredanne/cubes,jell0720/cubes,she11c0de/cubes
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise CubesError("Unable to find store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise CubesError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
Raise ConfigurationError error that causes server to fail and dump whole stacktrace
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise ConfigurationError("Unknown store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise ConfigurationError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
<commit_before>from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise CubesError("Unable to find store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise CubesError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
<commit_msg>Raise ConfigurationError error that causes server to fail and dump whole stacktrace<commit_after>
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise ConfigurationError("Unknown store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise ConfigurationError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise CubesError("Unable to find store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise CubesError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
Raise ConfigurationError error that causes server to fail and dump whole stacktracefrom .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise ConfigurationError("Unknown store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise ConfigurationError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
<commit_before>from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise CubesError("Unable to find store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise CubesError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
<commit_msg>Raise ConfigurationError error that causes server to fail and dump whole stacktrace<commit_after>from .errors import *
from .browser import AggregationBrowser
from .extensions import get_namespace, initialize_namespace
__all__ = (
"open_store",
"Store"
)
def open_store(name, **options):
"""Gets a new instance of a model provider with name `name`."""
ns = get_namespace("stores")
if not ns:
ns = initialize_namespace("stores", root_class=Store,
suffix="_store")
try:
factory = ns[name]
except KeyError:
raise ConfigurationError("Unknown store '%s'" % name)
return factory(**options)
def create_browser(type_, cube, store, locale, **options):
"""Creates a new browser."""
ns = get_namespace("browsers")
if not ns:
ns = initialize_namespace("browsers", root_class=AggregationBrowser,
suffix="_browser")
try:
factory = ns[type_]
except KeyError:
raise ConfigurationError("Unable to find browser of type '%s'" % type_)
return factory(cube=cube, store=store, locale=locale, **options)
class Store(object):
"""Abstract class to find other stores through the class hierarchy."""
pass
|
3da5a4dd70b50b7988d0023087a9d73274f273ba
|
tests/test_smartypants.py
|
tests/test_smartypants.py
|
# -*- coding: utf-8 -*-
# This file is part of python-markups test suite
# License: BSD
# Copyright: (C) Dmitry Shachnev, 2012
from markups.common import educate as ed
import unittest
class SmartyTest(unittest.TestCase):
def test_quotes(self):
self.assertEqual(ed('"Isn\'t this fun?"'), '“Isn’t this fun?”')
self.assertEqual(ed('"\'Quoted\' words in a larger quote."'),
'“‘Quoted’ words in a larger quote.”')
def test_dates(self):
self.assertEqual(ed("1440--80's"), "1440–80’s")
self.assertEqual(ed("'80s"), "’80s")
def test_ellipses_and_dashes(self):
self.assertEqual(ed('em-dashes (---) and ellipes (...)'),
'em-dashes (—) and ellipes (…)')
if __name__ == '__main__':
unittest.main()
|
Add (currently failing) smartypants test
|
Add (currently failing) smartypants test
|
Python
|
bsd-3-clause
|
mitya57/pymarkups,retext-project/pymarkups
|
Add (currently failing) smartypants test
|
# -*- coding: utf-8 -*-
# This file is part of python-markups test suite
# License: BSD
# Copyright: (C) Dmitry Shachnev, 2012
from markups.common import educate as ed
import unittest
class SmartyTest(unittest.TestCase):
def test_quotes(self):
self.assertEqual(ed('"Isn\'t this fun?"'), '“Isn’t this fun?”')
self.assertEqual(ed('"\'Quoted\' words in a larger quote."'),
'“‘Quoted’ words in a larger quote.”')
def test_dates(self):
self.assertEqual(ed("1440--80's"), "1440–80’s")
self.assertEqual(ed("'80s"), "’80s")
def test_ellipses_and_dashes(self):
self.assertEqual(ed('em-dashes (---) and ellipes (...)'),
'em-dashes (—) and ellipes (…)')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add (currently failing) smartypants test<commit_after>
|
# -*- coding: utf-8 -*-
# This file is part of python-markups test suite
# License: BSD
# Copyright: (C) Dmitry Shachnev, 2012
from markups.common import educate as ed
import unittest
class SmartyTest(unittest.TestCase):
def test_quotes(self):
self.assertEqual(ed('"Isn\'t this fun?"'), '“Isn’t this fun?”')
self.assertEqual(ed('"\'Quoted\' words in a larger quote."'),
'“‘Quoted’ words in a larger quote.”')
def test_dates(self):
self.assertEqual(ed("1440--80's"), "1440–80’s")
self.assertEqual(ed("'80s"), "’80s")
def test_ellipses_and_dashes(self):
self.assertEqual(ed('em-dashes (---) and ellipes (...)'),
'em-dashes (—) and ellipes (…)')
if __name__ == '__main__':
unittest.main()
|
Add (currently failing) smartypants test# -*- coding: utf-8 -*-
# This file is part of python-markups test suite
# License: BSD
# Copyright: (C) Dmitry Shachnev, 2012
from markups.common import educate as ed
import unittest
class SmartyTest(unittest.TestCase):
def test_quotes(self):
self.assertEqual(ed('"Isn\'t this fun?"'), '“Isn’t this fun?”')
self.assertEqual(ed('"\'Quoted\' words in a larger quote."'),
'“‘Quoted’ words in a larger quote.”')
def test_dates(self):
self.assertEqual(ed("1440--80's"), "1440–80’s")
self.assertEqual(ed("'80s"), "’80s")
def test_ellipses_and_dashes(self):
self.assertEqual(ed('em-dashes (---) and ellipes (...)'),
'em-dashes (—) and ellipes (…)')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add (currently failing) smartypants test<commit_after># -*- coding: utf-8 -*-
# This file is part of python-markups test suite
# License: BSD
# Copyright: (C) Dmitry Shachnev, 2012
from markups.common import educate as ed
import unittest
class SmartyTest(unittest.TestCase):
def test_quotes(self):
self.assertEqual(ed('"Isn\'t this fun?"'), '“Isn’t this fun?”')
self.assertEqual(ed('"\'Quoted\' words in a larger quote."'),
'“‘Quoted’ words in a larger quote.”')
def test_dates(self):
self.assertEqual(ed("1440--80's"), "1440–80’s")
self.assertEqual(ed("'80s"), "’80s")
def test_ellipses_and_dashes(self):
self.assertEqual(ed('em-dashes (---) and ellipes (...)'),
'em-dashes (—) and ellipes (…)')
if __name__ == '__main__':
unittest.main()
|
|
7c94fd2484e5ba3528b44e0e38271c6f6126de1b
|
src/framematcher.py
|
src/framematcher.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Match Frame to appropriate VerbNet structures"""
from framestructure import *
import unittest
class EmptyFrameError(Exception):
"""Trying to use an empty frame in a match
:var frame1: VerbnetFrame, first frame
:var frame2: VerbnetFrame, second frame
"""
def __init__(self, frame1, frame2):
self.frame1 = frame1
self.frame2 = frame2
def __str__(self):
return ("Error : tried to use a frame without any slot in frame matching\n"
"frame 1 : {}\nframe 2 : {}".format(self.frame1, self.frame2))
def match_score(frame, model):
"""Compute the matching score between two frames
:param frame: real frame to test.
:type frame: VerbnetFrame.
:param model: VerbNet model with which to compare it.
:type mode: VerbnetFrame.
"""
num_match = 0
frame_size = 0
model_size = 0
stop_matching = False
for i,elem in enumerate(frame.structure):
if is_a_slot(elem): frame_size += 1
if i >= len(model.structure): stop_matching = True
if not stop_matching:
if is_a_match(elem, model.structure[i]):
if is_a_slot(elem): num_match += 1
else: stop_matching = True
for elem in model.structure:
if is_a_slot(elem): model_size += 1
if frame_size == 0 or model_size == 0:
raise EmptyFrameError(frame, model)
return int(100 * (num_match / frame_size + num_match / model_size))
def is_a_slot(elem):
return elem.isupper() and elem != "V"
def is_a_match(elem1, elem2):
return elem1 in elem2.split("/")
class frameMatcherTest(unittest.TestCase):
def test_1(self):
frame1 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, 200)
print(score)
def test_2(self):
frame1 = VerbnetFrame(["to", "be"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
with self.assertRaises(EmptyFrameError):
match_score(frame1, frame2)
def test_3(self):
frame1 = VerbnetFrame(["NP", "V", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, int(100*1/2+100*1/3))
print(score)
if __name__ == "__main__":
unittest.main()
|
Add a module to handle frame matching
|
Add a module to handle frame matching
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18208 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5
|
Python
|
agpl-3.0
|
aymara/knowledgesrl,aymara/knowledgesrl
|
Add a module to handle frame matching
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18208 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Match Frame to appropriate VerbNet structures"""
from framestructure import *
import unittest
class EmptyFrameError(Exception):
"""Trying to use an empty frame in a match
:var frame1: VerbnetFrame, first frame
:var frame2: VerbnetFrame, second frame
"""
def __init__(self, frame1, frame2):
self.frame1 = frame1
self.frame2 = frame2
def __str__(self):
return ("Error : tried to use a frame without any slot in frame matching\n"
"frame 1 : {}\nframe 2 : {}".format(self.frame1, self.frame2))
def match_score(frame, model):
"""Compute the matching score between two frames
:param frame: real frame to test.
:type frame: VerbnetFrame.
:param model: VerbNet model with which to compare it.
:type mode: VerbnetFrame.
"""
num_match = 0
frame_size = 0
model_size = 0
stop_matching = False
for i,elem in enumerate(frame.structure):
if is_a_slot(elem): frame_size += 1
if i >= len(model.structure): stop_matching = True
if not stop_matching:
if is_a_match(elem, model.structure[i]):
if is_a_slot(elem): num_match += 1
else: stop_matching = True
for elem in model.structure:
if is_a_slot(elem): model_size += 1
if frame_size == 0 or model_size == 0:
raise EmptyFrameError(frame, model)
return int(100 * (num_match / frame_size + num_match / model_size))
def is_a_slot(elem):
return elem.isupper() and elem != "V"
def is_a_match(elem1, elem2):
return elem1 in elem2.split("/")
class frameMatcherTest(unittest.TestCase):
def test_1(self):
frame1 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, 200)
print(score)
def test_2(self):
frame1 = VerbnetFrame(["to", "be"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
with self.assertRaises(EmptyFrameError):
match_score(frame1, frame2)
def test_3(self):
frame1 = VerbnetFrame(["NP", "V", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, int(100*1/2+100*1/3))
print(score)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a module to handle frame matching
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18208 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Match Frame to appropriate VerbNet structures"""
from framestructure import *
import unittest
class EmptyFrameError(Exception):
"""Trying to use an empty frame in a match
:var frame1: VerbnetFrame, first frame
:var frame2: VerbnetFrame, second frame
"""
def __init__(self, frame1, frame2):
self.frame1 = frame1
self.frame2 = frame2
def __str__(self):
return ("Error : tried to use a frame without any slot in frame matching\n"
"frame 1 : {}\nframe 2 : {}".format(self.frame1, self.frame2))
def match_score(frame, model):
"""Compute the matching score between two frames
:param frame: real frame to test.
:type frame: VerbnetFrame.
:param model: VerbNet model with which to compare it.
:type mode: VerbnetFrame.
"""
num_match = 0
frame_size = 0
model_size = 0
stop_matching = False
for i,elem in enumerate(frame.structure):
if is_a_slot(elem): frame_size += 1
if i >= len(model.structure): stop_matching = True
if not stop_matching:
if is_a_match(elem, model.structure[i]):
if is_a_slot(elem): num_match += 1
else: stop_matching = True
for elem in model.structure:
if is_a_slot(elem): model_size += 1
if frame_size == 0 or model_size == 0:
raise EmptyFrameError(frame, model)
return int(100 * (num_match / frame_size + num_match / model_size))
def is_a_slot(elem):
return elem.isupper() and elem != "V"
def is_a_match(elem1, elem2):
return elem1 in elem2.split("/")
class frameMatcherTest(unittest.TestCase):
def test_1(self):
frame1 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, 200)
print(score)
def test_2(self):
frame1 = VerbnetFrame(["to", "be"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
with self.assertRaises(EmptyFrameError):
match_score(frame1, frame2)
def test_3(self):
frame1 = VerbnetFrame(["NP", "V", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, int(100*1/2+100*1/3))
print(score)
if __name__ == "__main__":
unittest.main()
|
Add a module to handle frame matching
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18208 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Match Frame to appropriate VerbNet structures"""
from framestructure import *
import unittest
class EmptyFrameError(Exception):
"""Trying to use an empty frame in a match
:var frame1: VerbnetFrame, first frame
:var frame2: VerbnetFrame, second frame
"""
def __init__(self, frame1, frame2):
self.frame1 = frame1
self.frame2 = frame2
def __str__(self):
return ("Error : tried to use a frame without any slot in frame matching\n"
"frame 1 : {}\nframe 2 : {}".format(self.frame1, self.frame2))
def match_score(frame, model):
"""Compute the matching score between two frames
:param frame: real frame to test.
:type frame: VerbnetFrame.
:param model: VerbNet model with which to compare it.
:type mode: VerbnetFrame.
"""
num_match = 0
frame_size = 0
model_size = 0
stop_matching = False
for i,elem in enumerate(frame.structure):
if is_a_slot(elem): frame_size += 1
if i >= len(model.structure): stop_matching = True
if not stop_matching:
if is_a_match(elem, model.structure[i]):
if is_a_slot(elem): num_match += 1
else: stop_matching = True
for elem in model.structure:
if is_a_slot(elem): model_size += 1
if frame_size == 0 or model_size == 0:
raise EmptyFrameError(frame, model)
return int(100 * (num_match / frame_size + num_match / model_size))
def is_a_slot(elem):
return elem.isupper() and elem != "V"
def is_a_match(elem1, elem2):
return elem1 in elem2.split("/")
class frameMatcherTest(unittest.TestCase):
def test_1(self):
frame1 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, 200)
print(score)
def test_2(self):
frame1 = VerbnetFrame(["to", "be"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
with self.assertRaises(EmptyFrameError):
match_score(frame1, frame2)
def test_3(self):
frame1 = VerbnetFrame(["NP", "V", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, int(100*1/2+100*1/3))
print(score)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a module to handle frame matching
git-svn-id: a2d0af3c19596d99b5c1e07a0b4fed4eaca14ddf@18208 7fff26f0-e11d-0410-b8d0-f4b6ff9b0dc5<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Match Frame to appropriate VerbNet structures"""
from framestructure import *
import unittest
class EmptyFrameError(Exception):
"""Trying to use an empty frame in a match
:var frame1: VerbnetFrame, first frame
:var frame2: VerbnetFrame, second frame
"""
def __init__(self, frame1, frame2):
self.frame1 = frame1
self.frame2 = frame2
def __str__(self):
return ("Error : tried to use a frame without any slot in frame matching\n"
"frame 1 : {}\nframe 2 : {}".format(self.frame1, self.frame2))
def match_score(frame, model):
"""Compute the matching score between two frames
:param frame: real frame to test.
:type frame: VerbnetFrame.
:param model: VerbNet model with which to compare it.
:type mode: VerbnetFrame.
"""
num_match = 0
frame_size = 0
model_size = 0
stop_matching = False
for i,elem in enumerate(frame.structure):
if is_a_slot(elem): frame_size += 1
if i >= len(model.structure): stop_matching = True
if not stop_matching:
if is_a_match(elem, model.structure[i]):
if is_a_slot(elem): num_match += 1
else: stop_matching = True
for elem in model.structure:
if is_a_slot(elem): model_size += 1
if frame_size == 0 or model_size == 0:
raise EmptyFrameError(frame, model)
return int(100 * (num_match / frame_size + num_match / model_size))
def is_a_slot(elem):
return elem.isupper() and elem != "V"
def is_a_match(elem1, elem2):
return elem1 in elem2.split("/")
class frameMatcherTest(unittest.TestCase):
def test_1(self):
frame1 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, 200)
print(score)
def test_2(self):
frame1 = VerbnetFrame(["to", "be"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
with self.assertRaises(EmptyFrameError):
match_score(frame1, frame2)
def test_3(self):
frame1 = VerbnetFrame(["NP", "V", "with", "NP"], [])
frame2 = VerbnetFrame(["NP", "V", "NP", "with", "NP"], [])
score = match_score(frame1, frame2)
self.assertEqual(score, int(100*1/2+100*1/3))
print(score)
if __name__ == "__main__":
unittest.main()
|
|
5746ee3a5c07ea58631db85d43f103992d9e1e38
|
mdot_rest/migrations/0001_initial.py
|
mdot_rest/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('short_desc', models.CharField(max_length=200)),
('feature_desc', models.TextField()),
('web_url', models.URLField(blank=True)),
('iTunes_url', models.URLField(blank=True)),
('Google_Play_url', models.URLField(blank=True)),
('Windows_Store_url', models.URLField(blank=True)),
('support_url', models.URLField(blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
Add the migration for initial model.
|
Add the migration for initial model.
|
Python
|
apache-2.0
|
uw-it-aca/mdot-rest,uw-it-aca/mdot-rest
|
Add the migration for initial model.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('short_desc', models.CharField(max_length=200)),
('feature_desc', models.TextField()),
('web_url', models.URLField(blank=True)),
('iTunes_url', models.URLField(blank=True)),
('Google_Play_url', models.URLField(blank=True)),
('Windows_Store_url', models.URLField(blank=True)),
('support_url', models.URLField(blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
<commit_before><commit_msg>Add the migration for initial model.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('short_desc', models.CharField(max_length=200)),
('feature_desc', models.TextField()),
('web_url', models.URLField(blank=True)),
('iTunes_url', models.URLField(blank=True)),
('Google_Play_url', models.URLField(blank=True)),
('Windows_Store_url', models.URLField(blank=True)),
('support_url', models.URLField(blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
Add the migration for initial model.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('short_desc', models.CharField(max_length=200)),
('feature_desc', models.TextField()),
('web_url', models.URLField(blank=True)),
('iTunes_url', models.URLField(blank=True)),
('Google_Play_url', models.URLField(blank=True)),
('Windows_Store_url', models.URLField(blank=True)),
('support_url', models.URLField(blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
<commit_before><commit_msg>Add the migration for initial model.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('short_desc', models.CharField(max_length=200)),
('feature_desc', models.TextField()),
('web_url', models.URLField(blank=True)),
('iTunes_url', models.URLField(blank=True)),
('Google_Play_url', models.URLField(blank=True)),
('Windows_Store_url', models.URLField(blank=True)),
('support_url', models.URLField(blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
|
f6c10e4a70e6fa91bf5abf63c50bc3ec9c3ac90c
|
scenarios/update_user_replace.py
|
scenarios/update_user_replace.py
|
c_uuid = None
def store_c_uuid():
global c_uuid
c_uuid = next(iter(reality.resources_by_logical_name('C')))
def check_c_replaced():
test.assertNotEqual(c_uuid,
next(iter(reality.resources_by_logical_name('newC'))))
test.assertIsNot(c_uuid, None)
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
engine.call(store_c_uuid)
example_template_updated = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
})
engine.update_stack('foo', example_template_updated)
engine.noop(11)
engine.call(verify, example_template_updated)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.noop(12)
engine.call(verify, example_template_long)
engine.call(check_c_replaced)
engine.delete_stack('foo')
engine.noop(6)
engine.call(verify, Template({}))
|
Add a test for when the user renames a resource
|
Add a test for when the user renames a resource
|
Python
|
apache-2.0
|
zaneb/heat-convergence-prototype
|
Add a test for when the user renames a resource
|
c_uuid = None
def store_c_uuid():
global c_uuid
c_uuid = next(iter(reality.resources_by_logical_name('C')))
def check_c_replaced():
test.assertNotEqual(c_uuid,
next(iter(reality.resources_by_logical_name('newC'))))
test.assertIsNot(c_uuid, None)
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
engine.call(store_c_uuid)
example_template_updated = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
})
engine.update_stack('foo', example_template_updated)
engine.noop(11)
engine.call(verify, example_template_updated)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.noop(12)
engine.call(verify, example_template_long)
engine.call(check_c_replaced)
engine.delete_stack('foo')
engine.noop(6)
engine.call(verify, Template({}))
|
<commit_before><commit_msg>Add a test for when the user renames a resource<commit_after>
|
c_uuid = None
def store_c_uuid():
global c_uuid
c_uuid = next(iter(reality.resources_by_logical_name('C')))
def check_c_replaced():
test.assertNotEqual(c_uuid,
next(iter(reality.resources_by_logical_name('newC'))))
test.assertIsNot(c_uuid, None)
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
engine.call(store_c_uuid)
example_template_updated = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
})
engine.update_stack('foo', example_template_updated)
engine.noop(11)
engine.call(verify, example_template_updated)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.noop(12)
engine.call(verify, example_template_long)
engine.call(check_c_replaced)
engine.delete_stack('foo')
engine.noop(6)
engine.call(verify, Template({}))
|
Add a test for when the user renames a resourcec_uuid = None
def store_c_uuid():
global c_uuid
c_uuid = next(iter(reality.resources_by_logical_name('C')))
def check_c_replaced():
test.assertNotEqual(c_uuid,
next(iter(reality.resources_by_logical_name('newC'))))
test.assertIsNot(c_uuid, None)
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
engine.call(store_c_uuid)
example_template_updated = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
})
engine.update_stack('foo', example_template_updated)
engine.noop(11)
engine.call(verify, example_template_updated)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.noop(12)
engine.call(verify, example_template_long)
engine.call(check_c_replaced)
engine.delete_stack('foo')
engine.noop(6)
engine.call(verify, Template({}))
|
<commit_before><commit_msg>Add a test for when the user renames a resource<commit_after>c_uuid = None
def store_c_uuid():
global c_uuid
c_uuid = next(iter(reality.resources_by_logical_name('C')))
def check_c_replaced():
test.assertNotEqual(c_uuid,
next(iter(reality.resources_by_logical_name('newC'))))
test.assertIsNot(c_uuid, None)
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
engine.call(store_c_uuid)
example_template_updated = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
})
engine.update_stack('foo', example_template_updated)
engine.noop(11)
engine.call(verify, example_template_updated)
example_template_long = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'newC': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('newC')}, []),
'E': RsrcDef({'ca': GetAtt('newC', '!a')}, []),
'F': RsrcDef({}, ['D', 'E']),
})
engine.update_stack('foo', example_template_long)
engine.noop(12)
engine.call(verify, example_template_long)
engine.call(check_c_replaced)
engine.delete_stack('foo')
engine.noop(6)
engine.call(verify, Template({}))
|
|
48d9c31c80efc1bccc3ef66df2262323c626198a
|
Home/xsOros.py
|
Home/xsOros.py
|
def checkio(array):
if array[0][0] == array[0][1] == array[0][2] or array[0][0] == array[1][0] == array[2][0] or array[0][0] == array[1][1] == array[2][2]:
return array[0][0]
if array[1][0] == array[1][1] == array[1][2] or array[0][1] == array[1][1] == array[2][1] or array[2][0] == array[1][1] == array[0][2]:
return array[1][1]
if array[2][0] == array[2][1] == array[2][2] or array[0][2] == array[1][2] == array[2][2]:
return array[2][2]
return "D"
if __name__ == '__main__':
assert checkio([
"X.O",
"XX.",
"XOO"]) == "X", "Xs wins"
assert checkio([
"OO.",
"XOX",
"XOX"]) == "O", "Os wins"
assert checkio([
"OOX",
"XXO",
"OXX"]) == "D", "Draw"
|
Solve "Xs or Os Referee" problem
|
Solve "Xs or Os Referee" problem
|
Python
|
mit
|
edwardzhu/checkio-solution
|
Solve "Xs or Os Referee" problem
|
def checkio(array):
if array[0][0] == array[0][1] == array[0][2] or array[0][0] == array[1][0] == array[2][0] or array[0][0] == array[1][1] == array[2][2]:
return array[0][0]
if array[1][0] == array[1][1] == array[1][2] or array[0][1] == array[1][1] == array[2][1] or array[2][0] == array[1][1] == array[0][2]:
return array[1][1]
if array[2][0] == array[2][1] == array[2][2] or array[0][2] == array[1][2] == array[2][2]:
return array[2][2]
return "D"
if __name__ == '__main__':
assert checkio([
"X.O",
"XX.",
"XOO"]) == "X", "Xs wins"
assert checkio([
"OO.",
"XOX",
"XOX"]) == "O", "Os wins"
assert checkio([
"OOX",
"XXO",
"OXX"]) == "D", "Draw"
|
<commit_before><commit_msg>Solve "Xs or Os Referee" problem<commit_after>
|
def checkio(array):
if array[0][0] == array[0][1] == array[0][2] or array[0][0] == array[1][0] == array[2][0] or array[0][0] == array[1][1] == array[2][2]:
return array[0][0]
if array[1][0] == array[1][1] == array[1][2] or array[0][1] == array[1][1] == array[2][1] or array[2][0] == array[1][1] == array[0][2]:
return array[1][1]
if array[2][0] == array[2][1] == array[2][2] or array[0][2] == array[1][2] == array[2][2]:
return array[2][2]
return "D"
if __name__ == '__main__':
assert checkio([
"X.O",
"XX.",
"XOO"]) == "X", "Xs wins"
assert checkio([
"OO.",
"XOX",
"XOX"]) == "O", "Os wins"
assert checkio([
"OOX",
"XXO",
"OXX"]) == "D", "Draw"
|
Solve "Xs or Os Referee" problemdef checkio(array):
if array[0][0] == array[0][1] == array[0][2] or array[0][0] == array[1][0] == array[2][0] or array[0][0] == array[1][1] == array[2][2]:
return array[0][0]
if array[1][0] == array[1][1] == array[1][2] or array[0][1] == array[1][1] == array[2][1] or array[2][0] == array[1][1] == array[0][2]:
return array[1][1]
if array[2][0] == array[2][1] == array[2][2] or array[0][2] == array[1][2] == array[2][2]:
return array[2][2]
return "D"
if __name__ == '__main__':
assert checkio([
"X.O",
"XX.",
"XOO"]) == "X", "Xs wins"
assert checkio([
"OO.",
"XOX",
"XOX"]) == "O", "Os wins"
assert checkio([
"OOX",
"XXO",
"OXX"]) == "D", "Draw"
|
<commit_before><commit_msg>Solve "Xs or Os Referee" problem<commit_after>def checkio(array):
if array[0][0] == array[0][1] == array[0][2] or array[0][0] == array[1][0] == array[2][0] or array[0][0] == array[1][1] == array[2][2]:
return array[0][0]
if array[1][0] == array[1][1] == array[1][2] or array[0][1] == array[1][1] == array[2][1] or array[2][0] == array[1][1] == array[0][2]:
return array[1][1]
if array[2][0] == array[2][1] == array[2][2] or array[0][2] == array[1][2] == array[2][2]:
return array[2][2]
return "D"
if __name__ == '__main__':
assert checkio([
"X.O",
"XX.",
"XOO"]) == "X", "Xs wins"
assert checkio([
"OO.",
"XOX",
"XOX"]) == "O", "Os wins"
assert checkio([
"OOX",
"XXO",
"OXX"]) == "D", "Draw"
|
|
032a4a545a8b07b2cc96482dfee557b013b132eb
|
build_index.py
|
build_index.py
|
#!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, 'publisher')
import options
output_dir = 'output'
dirs = [d for d in glob.glob('%s/*' % output_dir) if os.path.isdir(d)]
pages = []
cum_pages = [1]
for d in sorted(dirs):
try:
stats = options.cfg2dict(os.path.join(d, 'paper_stats.cfg'))
pages.append(int(stats['pages']))
cum_pages.append(cum_pages[-1] + pages[-1])
print '"%s" from p. %s to %s' % (os.path.basename(d), cum_pages[-2],
cum_pages[-1] - 1)
f = open(os.path.join(d, 'page_numbers.tex'), 'w')
f.write('\setcounter{page}{%s}' % cum_pages[-2])
f.close()
except IOError, e:
continue
|
Add script that generates page numbers.
|
Add script that generates page numbers.
|
Python
|
bsd-2-clause
|
sbenthall/scipy_proceedings,katyhuff/scipy_proceedings,mikaem/euroscipy_proceedings,michaelpacer/scipy_proceedings,mjklemm/euroscipy_proceedings,mikaem/euroscipy_proceedings,michaelpacer/scipy_proceedings,SepidehAlassi/euroscipy_proceedings,michaelpacer/scipy_proceedings,SepidehAlassi/euroscipy_proceedings,euroscipy/euroscipy_proceedings,sbenthall/scipy_proceedings,euroscipy/euroscipy_proceedings,chendaniely/scipy_proceedings,helgee/euroscipy_proceedings,euroscipy/euroscipy_proceedings,dotsdl/scipy_proceedings,mjklemm/euroscipy_proceedings,katyhuff/scipy_proceedings,springcoil/euroscipy_proceedings,sbenthall/scipy_proceedings,springcoil/euroscipy_proceedings,Stewori/euroscipy_proceedings,katyhuff/scipy_proceedings,helgee/euroscipy_proceedings,mikaem/euroscipy_proceedings,chendaniely/scipy_proceedings,mwcraig/scipy_proceedings,dotsdl/scipy_proceedings,helgee/euroscipy_proceedings,juhasch/euroscipy_proceedings,juhasch/euroscipy_proceedings,SepidehAlassi/euroscipy_proceedings,mwcraig/scipy_proceedings,mjklemm/euroscipy_proceedings,juhasch/euroscipy_proceedings,chendaniely/scipy_proceedings,Stewori/euroscipy_proceedings,springcoil/euroscipy_proceedings,mwcraig/scipy_proceedings,Stewori/euroscipy_proceedings,dotsdl/scipy_proceedings
|
Add script that generates page numbers.
|
#!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, 'publisher')
import options
output_dir = 'output'
dirs = [d for d in glob.glob('%s/*' % output_dir) if os.path.isdir(d)]
pages = []
cum_pages = [1]
for d in sorted(dirs):
try:
stats = options.cfg2dict(os.path.join(d, 'paper_stats.cfg'))
pages.append(int(stats['pages']))
cum_pages.append(cum_pages[-1] + pages[-1])
print '"%s" from p. %s to %s' % (os.path.basename(d), cum_pages[-2],
cum_pages[-1] - 1)
f = open(os.path.join(d, 'page_numbers.tex'), 'w')
f.write('\setcounter{page}{%s}' % cum_pages[-2])
f.close()
except IOError, e:
continue
|
<commit_before><commit_msg>Add script that generates page numbers.<commit_after>
|
#!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, 'publisher')
import options
output_dir = 'output'
dirs = [d for d in glob.glob('%s/*' % output_dir) if os.path.isdir(d)]
pages = []
cum_pages = [1]
for d in sorted(dirs):
try:
stats = options.cfg2dict(os.path.join(d, 'paper_stats.cfg'))
pages.append(int(stats['pages']))
cum_pages.append(cum_pages[-1] + pages[-1])
print '"%s" from p. %s to %s' % (os.path.basename(d), cum_pages[-2],
cum_pages[-1] - 1)
f = open(os.path.join(d, 'page_numbers.tex'), 'w')
f.write('\setcounter{page}{%s}' % cum_pages[-2])
f.close()
except IOError, e:
continue
|
Add script that generates page numbers.#!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, 'publisher')
import options
output_dir = 'output'
dirs = [d for d in glob.glob('%s/*' % output_dir) if os.path.isdir(d)]
pages = []
cum_pages = [1]
for d in sorted(dirs):
try:
stats = options.cfg2dict(os.path.join(d, 'paper_stats.cfg'))
pages.append(int(stats['pages']))
cum_pages.append(cum_pages[-1] + pages[-1])
print '"%s" from p. %s to %s' % (os.path.basename(d), cum_pages[-2],
cum_pages[-1] - 1)
f = open(os.path.join(d, 'page_numbers.tex'), 'w')
f.write('\setcounter{page}{%s}' % cum_pages[-2])
f.close()
except IOError, e:
continue
|
<commit_before><commit_msg>Add script that generates page numbers.<commit_after>#!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, 'publisher')
import options
output_dir = 'output'
dirs = [d for d in glob.glob('%s/*' % output_dir) if os.path.isdir(d)]
pages = []
cum_pages = [1]
for d in sorted(dirs):
try:
stats = options.cfg2dict(os.path.join(d, 'paper_stats.cfg'))
pages.append(int(stats['pages']))
cum_pages.append(cum_pages[-1] + pages[-1])
print '"%s" from p. %s to %s' % (os.path.basename(d), cum_pages[-2],
cum_pages[-1] - 1)
f = open(os.path.join(d, 'page_numbers.tex'), 'w')
f.write('\setcounter{page}{%s}' % cum_pages[-2])
f.close()
except IOError, e:
continue
|
|
07271f2edc56a7a01913eb6dd2aa27dcd4cb84ec
|
scripts/client_credentials.py
|
scripts/client_credentials.py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the license.
"""
import argparse
# Load in Vimeo auth info, requires adding parent dir to import paths.
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from vimeo.auth import get_client_credentials
"""
Convenience for generating a client credential access token.
It is not necessary to run this script before using the python Vimeo library.
This script is provided only as a convenience. For a service this should be used
to generate tokens that can make requests on behalf of the app.
The function for this flow can be found in vimeo/auth.py
"""
if __name__ == "__main__":
args = None
parser = argparse.ArgumentParser(description='Generate an API access token')
parser.add_argument('--cid', '-i', help="Your client ID", nargs=1, required=True)
parser.add_argument('--secret', '-s', help="Your client secret", nargs=1, required=True)
parser.add_argument('--scopes', '-o', help="Your requested scopes",
nargs=argparse.REMAINDER, required=False)
parser.add_argument('--dev', '-d', action="store_true", help="Use dev server")
args = parser.parse_args()
api_root = "http://api.vimeo."
if args.dev:
api_root += "dev"
else:
api_root += "com"
def do_auth_flow(api_root, cid, secret, scopes, redirect):
print "Visit %s in a browser" % get_auth_url(api_root, cid, scopes, redirect)
auth_code = raw_input("Enter auth code: ")
return get_access_token(auth_code, api_root, cid, secret, redirect)
print "Client token is %s" % get_client_credentials(args.cid[0],
args.secret[0], scopes=args.scopes,
api_root=api_root)
|
Add in demo script to get client credentials.
|
Add in demo script to get client credentials.
|
Python
|
apache-2.0
|
blorenz/vimeo.py,gabrielgisoldo/vimeo.py,greedo/vimeo.py,vimeo/vimeo.py
|
Add in demo script to get client credentials.
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the license.
"""
import argparse
# Load in Vimeo auth info, requires adding parent dir to import paths.
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from vimeo.auth import get_client_credentials
"""
Convenience for generating a client credential access token.
It is not necessary to run this script before using the python Vimeo library.
This script is provided only as a convenience. For a service this should be used
to generate tokens that can make requests on behalf of the app.
The function for this flow can be found in vimeo/auth.py
"""
if __name__ == "__main__":
args = None
parser = argparse.ArgumentParser(description='Generate an API access token')
parser.add_argument('--cid', '-i', help="Your client ID", nargs=1, required=True)
parser.add_argument('--secret', '-s', help="Your client secret", nargs=1, required=True)
parser.add_argument('--scopes', '-o', help="Your requested scopes",
nargs=argparse.REMAINDER, required=False)
parser.add_argument('--dev', '-d', action="store_true", help="Use dev server")
args = parser.parse_args()
api_root = "http://api.vimeo."
if args.dev:
api_root += "dev"
else:
api_root += "com"
def do_auth_flow(api_root, cid, secret, scopes, redirect):
print "Visit %s in a browser" % get_auth_url(api_root, cid, scopes, redirect)
auth_code = raw_input("Enter auth code: ")
return get_access_token(auth_code, api_root, cid, secret, redirect)
print "Client token is %s" % get_client_credentials(args.cid[0],
args.secret[0], scopes=args.scopes,
api_root=api_root)
|
<commit_before><commit_msg>Add in demo script to get client credentials.<commit_after>
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the license.
"""
import argparse
# Load in Vimeo auth info, requires adding parent dir to import paths.
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from vimeo.auth import get_client_credentials
"""
Convenience for generating a client credential access token.
It is not necessary to run this script before using the python Vimeo library.
This script is provided only as a convenience. For a service this should be used
to generate tokens that can make requests on behalf of the app.
The function for this flow can be found in vimeo/auth.py
"""
if __name__ == "__main__":
args = None
parser = argparse.ArgumentParser(description='Generate an API access token')
parser.add_argument('--cid', '-i', help="Your client ID", nargs=1, required=True)
parser.add_argument('--secret', '-s', help="Your client secret", nargs=1, required=True)
parser.add_argument('--scopes', '-o', help="Your requested scopes",
nargs=argparse.REMAINDER, required=False)
parser.add_argument('--dev', '-d', action="store_true", help="Use dev server")
args = parser.parse_args()
api_root = "http://api.vimeo."
if args.dev:
api_root += "dev"
else:
api_root += "com"
def do_auth_flow(api_root, cid, secret, scopes, redirect):
print "Visit %s in a browser" % get_auth_url(api_root, cid, scopes, redirect)
auth_code = raw_input("Enter auth code: ")
return get_access_token(auth_code, api_root, cid, secret, redirect)
print "Client token is %s" % get_client_credentials(args.cid[0],
args.secret[0], scopes=args.scopes,
api_root=api_root)
|
Add in demo script to get client credentials."""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the license.
"""
import argparse
# Load in Vimeo auth info, requires adding parent dir to import paths.
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from vimeo.auth import get_client_credentials
"""
Convenience for generating a client credential access token.
It is not necessary to run this script before using the python Vimeo library.
This script is provided only as a convenience. For a service this should be used
to generate tokens that can make requests on behalf of the app.
The function for this flow can be found in vimeo/auth.py
"""
if __name__ == "__main__":
args = None
parser = argparse.ArgumentParser(description='Generate an API access token')
parser.add_argument('--cid', '-i', help="Your client ID", nargs=1, required=True)
parser.add_argument('--secret', '-s', help="Your client secret", nargs=1, required=True)
parser.add_argument('--scopes', '-o', help="Your requested scopes",
nargs=argparse.REMAINDER, required=False)
parser.add_argument('--dev', '-d', action="store_true", help="Use dev server")
args = parser.parse_args()
api_root = "http://api.vimeo."
if args.dev:
api_root += "dev"
else:
api_root += "com"
def do_auth_flow(api_root, cid, secret, scopes, redirect):
print "Visit %s in a browser" % get_auth_url(api_root, cid, scopes, redirect)
auth_code = raw_input("Enter auth code: ")
return get_access_token(auth_code, api_root, cid, secret, redirect)
print "Client token is %s" % get_client_credentials(args.cid[0],
args.secret[0], scopes=args.scopes,
api_root=api_root)
|
<commit_before><commit_msg>Add in demo script to get client credentials.<commit_after>"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the license.
"""
import argparse
# Load in Vimeo auth info, requires adding parent dir to import paths.
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from vimeo.auth import get_client_credentials
"""
Convenience for generating a client credential access token.
It is not necessary to run this script before using the python Vimeo library.
This script is provided only as a convenience. For a service this should be used
to generate tokens that can make requests on behalf of the app.
The function for this flow can be found in vimeo/auth.py
"""
if __name__ == "__main__":
args = None
parser = argparse.ArgumentParser(description='Generate an API access token')
parser.add_argument('--cid', '-i', help="Your client ID", nargs=1, required=True)
parser.add_argument('--secret', '-s', help="Your client secret", nargs=1, required=True)
parser.add_argument('--scopes', '-o', help="Your requested scopes",
nargs=argparse.REMAINDER, required=False)
parser.add_argument('--dev', '-d', action="store_true", help="Use dev server")
args = parser.parse_args()
api_root = "http://api.vimeo."
if args.dev:
api_root += "dev"
else:
api_root += "com"
def do_auth_flow(api_root, cid, secret, scopes, redirect):
print "Visit %s in a browser" % get_auth_url(api_root, cid, scopes, redirect)
auth_code = raw_input("Enter auth code: ")
return get_access_token(auth_code, api_root, cid, secret, redirect)
print "Client token is %s" % get_client_credentials(args.cid[0],
args.secret[0], scopes=args.scopes,
api_root=api_root)
|
|
1122b162965201ee860ced829e6d9352da2d1e7f
|
handle_exception.py
|
handle_exception.py
|
""" Be DRY (don't repeat yourself), use exception handler function for try-except """
def handle_exception(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as error:
print('[ERROR] {}'.format(error))
exit(1)
return inner
@handle_exception
def divide(x, y):
return x/y
def show_result(x, y):
print('{x} divided by {y} is {result}'.format(x=x, y=y, result=divide(x, y)))
# It should work.
show_result(16, 2)
# Check the exception handler. Division by zero.
show_result(8, 0)
|
Use exception handler function for try-except
|
Use exception handler function for try-except
|
Python
|
mit
|
foobar167/junkyard,foobar167/junkyard,foobar167/junkyard,foobar167/junkyard,foobar167/junkyard,foobar167/junkyard
|
Use exception handler function for try-except
|
""" Be DRY (don't repeat yourself), use exception handler function for try-except """
def handle_exception(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as error:
print('[ERROR] {}'.format(error))
exit(1)
return inner
@handle_exception
def divide(x, y):
return x/y
def show_result(x, y):
print('{x} divided by {y} is {result}'.format(x=x, y=y, result=divide(x, y)))
# It should work.
show_result(16, 2)
# Check the exception handler. Division by zero.
show_result(8, 0)
|
<commit_before><commit_msg>Use exception handler function for try-except<commit_after>
|
""" Be DRY (don't repeat yourself), use exception handler function for try-except """
def handle_exception(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as error:
print('[ERROR] {}'.format(error))
exit(1)
return inner
@handle_exception
def divide(x, y):
return x/y
def show_result(x, y):
print('{x} divided by {y} is {result}'.format(x=x, y=y, result=divide(x, y)))
# It should work.
show_result(16, 2)
# Check the exception handler. Division by zero.
show_result(8, 0)
|
Use exception handler function for try-except""" Be DRY (don't repeat yourself), use exception handler function for try-except """
def handle_exception(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as error:
print('[ERROR] {}'.format(error))
exit(1)
return inner
@handle_exception
def divide(x, y):
return x/y
def show_result(x, y):
print('{x} divided by {y} is {result}'.format(x=x, y=y, result=divide(x, y)))
# It should work.
show_result(16, 2)
# Check the exception handler. Division by zero.
show_result(8, 0)
|
<commit_before><commit_msg>Use exception handler function for try-except<commit_after>""" Be DRY (don't repeat yourself), use exception handler function for try-except """
def handle_exception(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as error:
print('[ERROR] {}'.format(error))
exit(1)
return inner
@handle_exception
def divide(x, y):
return x/y
def show_result(x, y):
print('{x} divided by {y} is {result}'.format(x=x, y=y, result=divide(x, y)))
# It should work.
show_result(16, 2)
# Check the exception handler. Division by zero.
show_result(8, 0)
|
|
915b13a3940ec2bcde6784b30a97a44929ab3a02
|
docker-nodev.py
|
docker-nodev.py
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
Make the script work on python 2 and print actions.
|
Make the script work on python 2 and print actions.
|
Python
|
mit
|
nodev-io/nodev-starter-kit,nodev-io/nodev-starter-kit,nodev-io/nodev-tutorial
|
Make the script work on python 2 and print actions.
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
<commit_before><commit_msg>Make the script work on python 2 and print actions.<commit_after>
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
Make the script work on python 2 and print actions.
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
<commit_before><commit_msg>Make the script work on python 2 and print actions.<commit_after>
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
|
5be2c671197ac8e0192341e1cd3a3cdaabe23353
|
corehq/apps/commtrack/management/commands/check_multiple_parentage.py
|
corehq/apps/commtrack/management/commands/check_multiple_parentage.py
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
self.stdout.write(
"Found multiple parent options in domain: " +
d.name
)
|
Add management command to check for multiple parent types
|
Add management command to check for multiple parent types
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq
|
Add management command to check for multiple parent types
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
self.stdout.write(
"Found multiple parent options in domain: " +
d.name
)
|
<commit_before><commit_msg>Add management command to check for multiple parent types<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
self.stdout.write(
"Found multiple parent options in domain: " +
d.name
)
|
Add management command to check for multiple parent typesfrom django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
self.stdout.write(
"Found multiple parent options in domain: " +
d.name
)
|
<commit_before><commit_msg>Add management command to check for multiple parent types<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
domains = Domain.get_all()
for d in domains:
if d.commtrack_enabled:
for loc_type in d.commtrack_settings.location_types:
if len(loc_type.allowed_parents) > 1:
self.stdout.write(
"Found multiple parent options in domain: " +
d.name
)
|
|
f9637bba0068b078728824d76cadf2f0c11d7c06
|
hbase-tables/load_binary.py
|
hbase-tables/load_binary.py
|
##
## Insert various data into HBase
##
## cd $HUE_HOME (e.g. cd /usr/share/hue(/opt/cloudera/parcels/CDH-XXXXX/share/hue if using parcels))
## build/env/bin/hue shell
##
from hbase.api import HbaseApi
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:txt': 'Hue is awesome!'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:json': '{"user": "hue", "coolness": "extra"}'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I like HBase</xml>'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I LOVE HBase</xml>'})
## From https://github.com/romainr/hadoop-tutorials-examples
## cd /tmp
## git clone https://github.com/romainr/hadoop-tutorials-examples.git
root='/tmp/hadoop-tutorials-examples'
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:img': open(root + '/hbase-tables/data/hue-logo.png', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:html': open(root + '/hbase-tables/data/gethue.com.html', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:pdf': open(root + '/hbase-tables/data/gethue.pdf', "rb").read()})
|
Load binary data into HBase
|
[hbase] Load binary data into HBase
|
Python
|
apache-2.0
|
romainr/hadoop-tutorials-examples,skale1990/hadoop-tutorials-examples,skale1990/hadoop-tutorials-examples,skale1990/hadoop-tutorials-examples,romainr/hadoop-tutorials-examples,skale1990/hadoop-tutorials-examples,romainr/hadoop-tutorials-examples,skale1990/hadoop-tutorials-examples,romainr/hadoop-tutorials-examples,romainr/hadoop-tutorials-examples
|
[hbase] Load binary data into HBase
|
##
## Insert various data into HBase
##
## cd $HUE_HOME (e.g. cd /usr/share/hue(/opt/cloudera/parcels/CDH-XXXXX/share/hue if using parcels))
## build/env/bin/hue shell
##
from hbase.api import HbaseApi
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:txt': 'Hue is awesome!'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:json': '{"user": "hue", "coolness": "extra"}'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I like HBase</xml>'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I LOVE HBase</xml>'})
## From https://github.com/romainr/hadoop-tutorials-examples
## cd /tmp
## git clone https://github.com/romainr/hadoop-tutorials-examples.git
root='/tmp/hadoop-tutorials-examples'
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:img': open(root + '/hbase-tables/data/hue-logo.png', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:html': open(root + '/hbase-tables/data/gethue.com.html', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:pdf': open(root + '/hbase-tables/data/gethue.pdf', "rb").read()})
|
<commit_before><commit_msg>[hbase] Load binary data into HBase<commit_after>
|
##
## Insert various data into HBase
##
## cd $HUE_HOME (e.g. cd /usr/share/hue(/opt/cloudera/parcels/CDH-XXXXX/share/hue if using parcels))
## build/env/bin/hue shell
##
from hbase.api import HbaseApi
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:txt': 'Hue is awesome!'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:json': '{"user": "hue", "coolness": "extra"}'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I like HBase</xml>'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I LOVE HBase</xml>'})
## From https://github.com/romainr/hadoop-tutorials-examples
## cd /tmp
## git clone https://github.com/romainr/hadoop-tutorials-examples.git
root='/tmp/hadoop-tutorials-examples'
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:img': open(root + '/hbase-tables/data/hue-logo.png', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:html': open(root + '/hbase-tables/data/gethue.com.html', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:pdf': open(root + '/hbase-tables/data/gethue.pdf', "rb").read()})
|
[hbase] Load binary data into HBase##
## Insert various data into HBase
##
## cd $HUE_HOME (e.g. cd /usr/share/hue(/opt/cloudera/parcels/CDH-XXXXX/share/hue if using parcels))
## build/env/bin/hue shell
##
from hbase.api import HbaseApi
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:txt': 'Hue is awesome!'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:json': '{"user": "hue", "coolness": "extra"}'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I like HBase</xml>'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I LOVE HBase</xml>'})
## From https://github.com/romainr/hadoop-tutorials-examples
## cd /tmp
## git clone https://github.com/romainr/hadoop-tutorials-examples.git
root='/tmp/hadoop-tutorials-examples'
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:img': open(root + '/hbase-tables/data/hue-logo.png', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:html': open(root + '/hbase-tables/data/gethue.com.html', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:pdf': open(root + '/hbase-tables/data/gethue.pdf', "rb").read()})
|
<commit_before><commit_msg>[hbase] Load binary data into HBase<commit_after>##
## Insert various data into HBase
##
## cd $HUE_HOME (e.g. cd /usr/share/hue(/opt/cloudera/parcels/CDH-XXXXX/share/hue if using parcels))
## build/env/bin/hue shell
##
from hbase.api import HbaseApi
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:txt': 'Hue is awesome!'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:json': '{"user": "hue", "coolness": "extra"}'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I like HBase</xml>'})
HbaseApi().putRow('Cluster', 'events', 'hue-20130802', {'doc:version': '<xml>I LOVE HBase</xml>'})
## From https://github.com/romainr/hadoop-tutorials-examples
## cd /tmp
## git clone https://github.com/romainr/hadoop-tutorials-examples.git
root='/tmp/hadoop-tutorials-examples'
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:img': open(root + '/hbase-tables/data/hue-logo.png', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:html': open(root + '/hbase-tables/data/gethue.com.html', "rb").read()})
HbaseApi().putRow('Cluster', 'events', 'hue-20130801', {'doc:pdf': open(root + '/hbase-tables/data/gethue.pdf', "rb").read()})
|
|
ee68ac19f7417a16f4250486a064bfc8a8f37e47
|
wafer/talks/migrations/0011_talk_status_data_migration.py
|
wafer/talks/migrations/0011_talk_status_data_migration.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Data migration for the changed talk status
def change_pending(apps, schema_editor):
# Change Pending (P) status to
# (S) Submitted
# Use apps to ensure we have the correct version
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.filter(status='P'):
talk.status = 'S'
talk.save()
class Migration(migrations.Migration):
dependencies = [
('talks', '0010_auto_20161121_2134'),
]
operations = [
migrations.RunPython(change_pending),
]
|
Add data migration to update already existing talk states
|
Add data migration to update already existing talk states
|
Python
|
isc
|
CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer
|
Add data migration to update already existing talk states
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Data migration for the changed talk status
def change_pending(apps, schema_editor):
# Change Pending (P) status to
# (S) Submitted
# Use apps to ensure we have the correct version
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.filter(status='P'):
talk.status = 'S'
talk.save()
class Migration(migrations.Migration):
dependencies = [
('talks', '0010_auto_20161121_2134'),
]
operations = [
migrations.RunPython(change_pending),
]
|
<commit_before><commit_msg>Add data migration to update already existing talk states<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Data migration for the changed talk status
def change_pending(apps, schema_editor):
# Change Pending (P) status to
# (S) Submitted
# Use apps to ensure we have the correct version
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.filter(status='P'):
talk.status = 'S'
talk.save()
class Migration(migrations.Migration):
dependencies = [
('talks', '0010_auto_20161121_2134'),
]
operations = [
migrations.RunPython(change_pending),
]
|
Add data migration to update already existing talk states# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Data migration for the changed talk status
def change_pending(apps, schema_editor):
# Change Pending (P) status to
# (S) Submitted
# Use apps to ensure we have the correct version
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.filter(status='P'):
talk.status = 'S'
talk.save()
class Migration(migrations.Migration):
dependencies = [
('talks', '0010_auto_20161121_2134'),
]
operations = [
migrations.RunPython(change_pending),
]
|
<commit_before><commit_msg>Add data migration to update already existing talk states<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
# Data migration for the changed talk status
def change_pending(apps, schema_editor):
# Change Pending (P) status to
# (S) Submitted
# Use apps to ensure we have the correct version
Talk = apps.get_model("talks", "Talk")
for talk in Talk.objects.filter(status='P'):
talk.status = 'S'
talk.save()
class Migration(migrations.Migration):
dependencies = [
('talks', '0010_auto_20161121_2134'),
]
operations = [
migrations.RunPython(change_pending),
]
|
|
2c118ab3a6b516fae87280dac69cb9c5d7caa5a9
|
spacy/tests/doc/test_creation.py
|
spacy/tests/doc/test_creation.py
|
'''Test Doc sets up tokens correctly.'''
from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
from ...lemmatizerlookup import Lemmatizer
@pytest.fixture
def lemmatizer():
return Lemmatizer({'dogs': 'dog', 'boxen': 'box', 'mice': 'mouse'})
@pytest.fixture
def vocab(lemmatizer):
return Vocab(lemmatizer=lemmatizer)
def test_empty_doc(vocab):
doc = Doc(vocab)
assert len(doc) == 0
def test_single_word(vocab):
doc = Doc(vocab, words=['a'])
assert doc.text == 'a '
doc = Doc(vocab, words=['a'], spaces=[False])
assert doc.text == 'a'
def test_lookup_lemmatization(vocab):
doc = Doc(vocab, words=['dogs', 'dogses'])
assert doc[0].text == 'dogs'
assert doc[0].lemma_ == 'dog'
assert doc[1].text == 'dogses'
assert doc[1].lemma_ == 'dogses'
|
Add tests for Doc creation
|
Add tests for Doc creation
|
Python
|
mit
|
recognai/spaCy,aikramer2/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,aikramer2/spaCy,recognai/spaCy,explosion/spaCy
|
Add tests for Doc creation
|
'''Test Doc sets up tokens correctly.'''
from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
from ...lemmatizerlookup import Lemmatizer
@pytest.fixture
def lemmatizer():
return Lemmatizer({'dogs': 'dog', 'boxen': 'box', 'mice': 'mouse'})
@pytest.fixture
def vocab(lemmatizer):
return Vocab(lemmatizer=lemmatizer)
def test_empty_doc(vocab):
doc = Doc(vocab)
assert len(doc) == 0
def test_single_word(vocab):
doc = Doc(vocab, words=['a'])
assert doc.text == 'a '
doc = Doc(vocab, words=['a'], spaces=[False])
assert doc.text == 'a'
def test_lookup_lemmatization(vocab):
doc = Doc(vocab, words=['dogs', 'dogses'])
assert doc[0].text == 'dogs'
assert doc[0].lemma_ == 'dog'
assert doc[1].text == 'dogses'
assert doc[1].lemma_ == 'dogses'
|
<commit_before><commit_msg>Add tests for Doc creation<commit_after>
|
'''Test Doc sets up tokens correctly.'''
from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
from ...lemmatizerlookup import Lemmatizer
@pytest.fixture
def lemmatizer():
return Lemmatizer({'dogs': 'dog', 'boxen': 'box', 'mice': 'mouse'})
@pytest.fixture
def vocab(lemmatizer):
return Vocab(lemmatizer=lemmatizer)
def test_empty_doc(vocab):
doc = Doc(vocab)
assert len(doc) == 0
def test_single_word(vocab):
doc = Doc(vocab, words=['a'])
assert doc.text == 'a '
doc = Doc(vocab, words=['a'], spaces=[False])
assert doc.text == 'a'
def test_lookup_lemmatization(vocab):
doc = Doc(vocab, words=['dogs', 'dogses'])
assert doc[0].text == 'dogs'
assert doc[0].lemma_ == 'dog'
assert doc[1].text == 'dogses'
assert doc[1].lemma_ == 'dogses'
|
Add tests for Doc creation'''Test Doc sets up tokens correctly.'''
from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
from ...lemmatizerlookup import Lemmatizer
@pytest.fixture
def lemmatizer():
return Lemmatizer({'dogs': 'dog', 'boxen': 'box', 'mice': 'mouse'})
@pytest.fixture
def vocab(lemmatizer):
return Vocab(lemmatizer=lemmatizer)
def test_empty_doc(vocab):
doc = Doc(vocab)
assert len(doc) == 0
def test_single_word(vocab):
doc = Doc(vocab, words=['a'])
assert doc.text == 'a '
doc = Doc(vocab, words=['a'], spaces=[False])
assert doc.text == 'a'
def test_lookup_lemmatization(vocab):
doc = Doc(vocab, words=['dogs', 'dogses'])
assert doc[0].text == 'dogs'
assert doc[0].lemma_ == 'dog'
assert doc[1].text == 'dogses'
assert doc[1].lemma_ == 'dogses'
|
<commit_before><commit_msg>Add tests for Doc creation<commit_after>'''Test Doc sets up tokens correctly.'''
from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
from ...lemmatizerlookup import Lemmatizer
@pytest.fixture
def lemmatizer():
return Lemmatizer({'dogs': 'dog', 'boxen': 'box', 'mice': 'mouse'})
@pytest.fixture
def vocab(lemmatizer):
return Vocab(lemmatizer=lemmatizer)
def test_empty_doc(vocab):
doc = Doc(vocab)
assert len(doc) == 0
def test_single_word(vocab):
doc = Doc(vocab, words=['a'])
assert doc.text == 'a '
doc = Doc(vocab, words=['a'], spaces=[False])
assert doc.text == 'a'
def test_lookup_lemmatization(vocab):
doc = Doc(vocab, words=['dogs', 'dogses'])
assert doc[0].text == 'dogs'
assert doc[0].lemma_ == 'dog'
assert doc[1].text == 'dogses'
assert doc[1].lemma_ == 'dogses'
|
|
5e7838d013dad41e4508e3736d3f2b401b1a0156
|
cli/command.py
|
cli/command.py
|
"""
In this module we define the interface between the cli input provided
by the user and the analytics required by the user
"""
def cmd_list_basic(args):
"""
List the basic analytics commands and their summary
"""
pass
def cmd_list_all(args):
"""
List all the analytics commands and their summary
"""
pass
def cmd_run_basic(args):
"""
Run basic analytics
"""
pass
|
Define interface between cli and data analysis
|
Define interface between cli and data analysis
|
Python
|
mit
|
McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research
|
Define interface between cli and data analysis
|
"""
In this module we define the interface between the cli input provided
by the user and the analytics required by the user
"""
def cmd_list_basic(args):
"""
List the basic analytics commands and their summary
"""
pass
def cmd_list_all(args):
"""
List all the analytics commands and their summary
"""
pass
def cmd_run_basic(args):
"""
Run basic analytics
"""
pass
|
<commit_before><commit_msg>Define interface between cli and data analysis<commit_after>
|
"""
In this module we define the interface between the cli input provided
by the user and the analytics required by the user
"""
def cmd_list_basic(args):
"""
List the basic analytics commands and their summary
"""
pass
def cmd_list_all(args):
"""
List all the analytics commands and their summary
"""
pass
def cmd_run_basic(args):
"""
Run basic analytics
"""
pass
|
Define interface between cli and data analysis"""
In this module we define the interface between the cli input provided
by the user and the analytics required by the user
"""
def cmd_list_basic(args):
"""
List the basic analytics commands and their summary
"""
pass
def cmd_list_all(args):
"""
List all the analytics commands and their summary
"""
pass
def cmd_run_basic(args):
"""
Run basic analytics
"""
pass
|
<commit_before><commit_msg>Define interface between cli and data analysis<commit_after>"""
In this module we define the interface between the cli input provided
by the user and the analytics required by the user
"""
def cmd_list_basic(args):
"""
List the basic analytics commands and their summary
"""
pass
def cmd_list_all(args):
"""
List all the analytics commands and their summary
"""
pass
def cmd_run_basic(args):
"""
Run basic analytics
"""
pass
|
|
87c4a770b717fdfca819ce387163b0282a839ada
|
pombola/south_africa/management/commands/south_africa_sync_wikidata_ids_from_everypolitician.py
|
pombola/south_africa/management/commands/south_africa_sync_wikidata_ids_from_everypolitician.py
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync Wikidata IDs from Everypolitician to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.identifier_value('wikidata')
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
wikidata_id = id_lookup.get(str(person.id))
if wikidata_id is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='wikidata',
identifier=wikidata_id,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
Add script to import Wikidata IDs from EveryPolitician
|
Add script to import Wikidata IDs from EveryPolitician
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add script to import Wikidata IDs from EveryPolitician
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync Wikidata IDs from Everypolitician to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.identifier_value('wikidata')
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
wikidata_id = id_lookup.get(str(person.id))
if wikidata_id is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='wikidata',
identifier=wikidata_id,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
<commit_before><commit_msg>Add script to import Wikidata IDs from EveryPolitician<commit_after>
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync Wikidata IDs from Everypolitician to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.identifier_value('wikidata')
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
wikidata_id = id_lookup.get(str(person.id))
if wikidata_id is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='wikidata',
identifier=wikidata_id,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
Add script to import Wikidata IDs from EveryPoliticianfrom everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync Wikidata IDs from Everypolitician to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.identifier_value('wikidata')
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
wikidata_id = id_lookup.get(str(person.id))
if wikidata_id is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='wikidata',
identifier=wikidata_id,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
<commit_before><commit_msg>Add script to import Wikidata IDs from EveryPolitician<commit_after>from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync Wikidata IDs from Everypolitician to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.identifier_value('wikidata')
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
wikidata_id = id_lookup.get(str(person.id))
if wikidata_id is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='wikidata',
identifier=wikidata_id,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
|
733d1e49a68217b81654d3955e0066c590fe988d
|
toolkit/util/management/commands/delete_non_members_who_dont_get_the_mailout.py
|
toolkit/util/management/commands/delete_non_members_who_dont_get_the_mailout.py
|
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from toolkit.members.models import Member
class Command(BaseCommand):
help = "Delete non-members who don't get the members mailout"
def handle(self, *args, **options):
dead_wood = (Member.objects.filter(email__isnull=False)
.exclude(email='')
.exclude(mailout_failed=True)
.exclude(mailout=True)
.exclude(is_member=True)
)
self.stdout.write('Deleting...')
for member in dead_wood:
self.stdout.write('%s <%s> joined %s' % (
member.name,
member.email,
member.created_at)
)
# member.delete()
self.stdout.write(self.style.SUCCESS(
'\nDeleted %d non-members\n' % len(dead_wood)))
|
Add manage.py utility to delete non members who dont get the mailout
|
Add manage.py utility to delete non members who dont get the mailout
|
Python
|
agpl-3.0
|
BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit
|
Add manage.py utility to delete non members who dont get the mailout
|
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from toolkit.members.models import Member
class Command(BaseCommand):
help = "Delete non-members who don't get the members mailout"
def handle(self, *args, **options):
dead_wood = (Member.objects.filter(email__isnull=False)
.exclude(email='')
.exclude(mailout_failed=True)
.exclude(mailout=True)
.exclude(is_member=True)
)
self.stdout.write('Deleting...')
for member in dead_wood:
self.stdout.write('%s <%s> joined %s' % (
member.name,
member.email,
member.created_at)
)
# member.delete()
self.stdout.write(self.style.SUCCESS(
'\nDeleted %d non-members\n' % len(dead_wood)))
|
<commit_before><commit_msg>Add manage.py utility to delete non members who dont get the mailout<commit_after>
|
# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from toolkit.members.models import Member
class Command(BaseCommand):
help = "Delete non-members who don't get the members mailout"
def handle(self, *args, **options):
dead_wood = (Member.objects.filter(email__isnull=False)
.exclude(email='')
.exclude(mailout_failed=True)
.exclude(mailout=True)
.exclude(is_member=True)
)
self.stdout.write('Deleting...')
for member in dead_wood:
self.stdout.write('%s <%s> joined %s' % (
member.name,
member.email,
member.created_at)
)
# member.delete()
self.stdout.write(self.style.SUCCESS(
'\nDeleted %d non-members\n' % len(dead_wood)))
|
Add manage.py utility to delete non members who dont get the mailout# https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from toolkit.members.models import Member
class Command(BaseCommand):
help = "Delete non-members who don't get the members mailout"
def handle(self, *args, **options):
dead_wood = (Member.objects.filter(email__isnull=False)
.exclude(email='')
.exclude(mailout_failed=True)
.exclude(mailout=True)
.exclude(is_member=True)
)
self.stdout.write('Deleting...')
for member in dead_wood:
self.stdout.write('%s <%s> joined %s' % (
member.name,
member.email,
member.created_at)
)
# member.delete()
self.stdout.write(self.style.SUCCESS(
'\nDeleted %d non-members\n' % len(dead_wood)))
|
<commit_before><commit_msg>Add manage.py utility to delete non members who dont get the mailout<commit_after># https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from toolkit.members.models import Member
class Command(BaseCommand):
help = "Delete non-members who don't get the members mailout"
def handle(self, *args, **options):
dead_wood = (Member.objects.filter(email__isnull=False)
.exclude(email='')
.exclude(mailout_failed=True)
.exclude(mailout=True)
.exclude(is_member=True)
)
self.stdout.write('Deleting...')
for member in dead_wood:
self.stdout.write('%s <%s> joined %s' % (
member.name,
member.email,
member.created_at)
)
# member.delete()
self.stdout.write(self.style.SUCCESS(
'\nDeleted %d non-members\n' % len(dead_wood)))
|
|
d8ac6c5d9f4bf39eec590cd1f1b0d477a96318df
|
correct_ocr.py
|
correct_ocr.py
|
import glob
from count_labels import load_data
from collections import Counter
from bs4 import BeautifulSoup
import codecs
from fuzzywuzzy import process
# create word list
words = Counter()
data_dir = '/home/jvdzwaan/data/embem/txt/corpus_annotation/'
files = glob.glob('{}*.txt'.format(data_dir))
for i, file_ in enumerate(files):
#print i+1, file_
X_data, Y_data = load_data(file_)
for line in X_data:
words.update([unicode(w.lower()) for w in line.decode('utf-8').split()[1:] if len(w) > 1])
print len(words.keys())
print words.most_common(10)
# load text to be corrected
text = '/home/jvdzwaan/Downloads/zip/ticcl.xml'
with codecs.open(text, 'r', 'utf8') as f:
soup = BeautifulSoup(f, 'xml')
lines = soup.find_all('t')
for line in lines:
if line.text:
for w in line.text.split():
repl, score = process.extractOne(w.lower(), words.keys())
print w, repl, score
|
Add test script to replace ocr mistakes
|
Add test script to replace ocr mistakes
Added a script that finds matches for ocr mistakes using the fuzzywuzzy
library (based on Levenstein distance) and a list of words in the
annotated texts. It seems only more noise is introduced.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add test script to replace ocr mistakes
Added a script that finds matches for ocr mistakes using the fuzzywuzzy
library (based on Levenstein distance) and a list of words in the
annotated texts. It seems only more noise is introduced.
|
import glob
from count_labels import load_data
from collections import Counter
from bs4 import BeautifulSoup
import codecs
from fuzzywuzzy import process
# create word list
words = Counter()
data_dir = '/home/jvdzwaan/data/embem/txt/corpus_annotation/'
files = glob.glob('{}*.txt'.format(data_dir))
for i, file_ in enumerate(files):
#print i+1, file_
X_data, Y_data = load_data(file_)
for line in X_data:
words.update([unicode(w.lower()) for w in line.decode('utf-8').split()[1:] if len(w) > 1])
print len(words.keys())
print words.most_common(10)
# load text to be corrected
text = '/home/jvdzwaan/Downloads/zip/ticcl.xml'
with codecs.open(text, 'r', 'utf8') as f:
soup = BeautifulSoup(f, 'xml')
lines = soup.find_all('t')
for line in lines:
if line.text:
for w in line.text.split():
repl, score = process.extractOne(w.lower(), words.keys())
print w, repl, score
|
<commit_before><commit_msg>Add test script to replace ocr mistakes
Added a script that finds matches for ocr mistakes using the fuzzywuzzy
library (based on Levenstein distance) and a list of words in the
annotated texts. It seems only more noise is introduced.<commit_after>
|
import glob
from count_labels import load_data
from collections import Counter
from bs4 import BeautifulSoup
import codecs
from fuzzywuzzy import process
# create word list
words = Counter()
data_dir = '/home/jvdzwaan/data/embem/txt/corpus_annotation/'
files = glob.glob('{}*.txt'.format(data_dir))
for i, file_ in enumerate(files):
#print i+1, file_
X_data, Y_data = load_data(file_)
for line in X_data:
words.update([unicode(w.lower()) for w in line.decode('utf-8').split()[1:] if len(w) > 1])
print len(words.keys())
print words.most_common(10)
# load text to be corrected
text = '/home/jvdzwaan/Downloads/zip/ticcl.xml'
with codecs.open(text, 'r', 'utf8') as f:
soup = BeautifulSoup(f, 'xml')
lines = soup.find_all('t')
for line in lines:
if line.text:
for w in line.text.split():
repl, score = process.extractOne(w.lower(), words.keys())
print w, repl, score
|
Add test script to replace ocr mistakes
Added a script that finds matches for ocr mistakes using the fuzzywuzzy
library (based on Levenstein distance) and a list of words in the
annotated texts. It seems only more noise is introduced.import glob
from count_labels import load_data
from collections import Counter
from bs4 import BeautifulSoup
import codecs
from fuzzywuzzy import process
# create word list
words = Counter()
data_dir = '/home/jvdzwaan/data/embem/txt/corpus_annotation/'
files = glob.glob('{}*.txt'.format(data_dir))
for i, file_ in enumerate(files):
#print i+1, file_
X_data, Y_data = load_data(file_)
for line in X_data:
words.update([unicode(w.lower()) for w in line.decode('utf-8').split()[1:] if len(w) > 1])
print len(words.keys())
print words.most_common(10)
# load text to be corrected
text = '/home/jvdzwaan/Downloads/zip/ticcl.xml'
with codecs.open(text, 'r', 'utf8') as f:
soup = BeautifulSoup(f, 'xml')
lines = soup.find_all('t')
for line in lines:
if line.text:
for w in line.text.split():
repl, score = process.extractOne(w.lower(), words.keys())
print w, repl, score
|
<commit_before><commit_msg>Add test script to replace ocr mistakes
Added a script that finds matches for ocr mistakes using the fuzzywuzzy
library (based on Levenstein distance) and a list of words in the
annotated texts. It seems only more noise is introduced.<commit_after>import glob
from count_labels import load_data
from collections import Counter
from bs4 import BeautifulSoup
import codecs
from fuzzywuzzy import process
# create word list
words = Counter()
data_dir = '/home/jvdzwaan/data/embem/txt/corpus_annotation/'
files = glob.glob('{}*.txt'.format(data_dir))
for i, file_ in enumerate(files):
#print i+1, file_
X_data, Y_data = load_data(file_)
for line in X_data:
words.update([unicode(w.lower()) for w in line.decode('utf-8').split()[1:] if len(w) > 1])
print len(words.keys())
print words.most_common(10)
# load text to be corrected
text = '/home/jvdzwaan/Downloads/zip/ticcl.xml'
with codecs.open(text, 'r', 'utf8') as f:
soup = BeautifulSoup(f, 'xml')
lines = soup.find_all('t')
for line in lines:
if line.text:
for w in line.text.split():
repl, score = process.extractOne(w.lower(), words.keys())
print w, repl, score
|
|
c69166d98df2f7bebb3de629eb00328a5ef699d3
|
poradnia/keys/migrations/0003_auto_20150721_0232.py
|
poradnia/keys/migrations/0003_auto_20150721_0232.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keys', '0002_auto_20150710_2133'),
]
operations = [
migrations.AlterUniqueTogether(
name='key',
unique_together=set([('user', 'password')]),
),
]
|
Add unique to keys.AcccessKey.password per user
|
Add unique to keys.AcccessKey.password per user
|
Python
|
mit
|
watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,watchdogpolska/poradnia.siecobywatelska.pl,watchdogpolska/poradnia.siecobywatelska.pl,watchdogpolska/poradnia,watchdogpolska/poradnia,watchdogpolska/poradnia,rwakulszowa/poradnia,rwakulszowa/poradnia,rwakulszowa/poradnia
|
Add unique to keys.AcccessKey.password per user
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keys', '0002_auto_20150710_2133'),
]
operations = [
migrations.AlterUniqueTogether(
name='key',
unique_together=set([('user', 'password')]),
),
]
|
<commit_before><commit_msg>Add unique to keys.AcccessKey.password per user<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keys', '0002_auto_20150710_2133'),
]
operations = [
migrations.AlterUniqueTogether(
name='key',
unique_together=set([('user', 'password')]),
),
]
|
Add unique to keys.AcccessKey.password per user# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keys', '0002_auto_20150710_2133'),
]
operations = [
migrations.AlterUniqueTogether(
name='key',
unique_together=set([('user', 'password')]),
),
]
|
<commit_before><commit_msg>Add unique to keys.AcccessKey.password per user<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('keys', '0002_auto_20150710_2133'),
]
operations = [
migrations.AlterUniqueTogether(
name='key',
unique_together=set([('user', 'password')]),
),
]
|
|
16a7185447e28312d5e316b92b7d3d37cb25b079
|
contrib/svn-fe/svnrdump_sim.py
|
contrib/svn-fe/svnrdump_sim.py
|
#!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys, os
def getrevlimit():
var = 'SVNRMAX'
if os.environ.has_key(var):
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/': filename = filename[:-1] #remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r');
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '': break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and l == 'Revision-number: %s\n' % upper:
break;
if state == 'header' or state == 'selection':
if state == 'selection': wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print "usage: %s dump URL -rLOWER:UPPER"
sys.exit(1)
if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None: r[1] = getrevlimit()
if writedump(url, r[0], r[1]): ret = 0
else: ret = 1
sys.exit(ret)
|
Add a svnrdump-simulator replaying a dump file for testing
|
Add a svnrdump-simulator replaying a dump file for testing
To ease testing without depending on a reachable svn server, this
compact python script mimics parts of svnrdumps behaviour. It
requires the remote url to start with sim://.
Start and end revisions are evaluated. If the requested revision
doesn't exist, as it is the case with incremental imports, if no new
commit was added, it returns 1 (like svnrdump).
To allow using the same dump file for simulating multiple incremental
imports, the highest revision can be limited by setting the environment
variable SVNRMAX to that value. This simulates the situation where
higher revs don't exist yet.
Signed-off-by: Florian Achleitner <cded5fff9f7cb3ef9bc8c685776ce437facda4c3@gmail.com>
Acked-by: David Michael Barr <e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98@rr-dav.id.au>
Signed-off-by: Junio C Hamano <a6723cc3f76163bf7adb636a73ac3b0ceb3e6b9b@pobox.com>
|
Python
|
mit
|
destenson/git,destenson/git,destenson/git,destenson/git,destenson/git,destenson/git,destenson/git,destenson/git
|
Add a svnrdump-simulator replaying a dump file for testing
To ease testing without depending on a reachable svn server, this
compact python script mimics parts of svnrdumps behaviour. It
requires the remote url to start with sim://.
Start and end revisions are evaluated. If the requested revision
doesn't exist, as it is the case with incremental imports, if no new
commit was added, it returns 1 (like svnrdump).
To allow using the same dump file for simulating multiple incremental
imports, the highest revision can be limited by setting the environment
variable SVNRMAX to that value. This simulates the situation where
higher revs don't exist yet.
Signed-off-by: Florian Achleitner <cded5fff9f7cb3ef9bc8c685776ce437facda4c3@gmail.com>
Acked-by: David Michael Barr <e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98@rr-dav.id.au>
Signed-off-by: Junio C Hamano <a6723cc3f76163bf7adb636a73ac3b0ceb3e6b9b@pobox.com>
|
#!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys, os
def getrevlimit():
var = 'SVNRMAX'
if os.environ.has_key(var):
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/': filename = filename[:-1] #remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r');
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '': break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and l == 'Revision-number: %s\n' % upper:
break;
if state == 'header' or state == 'selection':
if state == 'selection': wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print "usage: %s dump URL -rLOWER:UPPER"
sys.exit(1)
if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None: r[1] = getrevlimit()
if writedump(url, r[0], r[1]): ret = 0
else: ret = 1
sys.exit(ret)
|
<commit_before><commit_msg>Add a svnrdump-simulator replaying a dump file for testing
To ease testing without depending on a reachable svn server, this
compact python script mimics parts of svnrdumps behaviour. It
requires the remote url to start with sim://.
Start and end revisions are evaluated. If the requested revision
doesn't exist, as it is the case with incremental imports, if no new
commit was added, it returns 1 (like svnrdump).
To allow using the same dump file for simulating multiple incremental
imports, the highest revision can be limited by setting the environment
variable SVNRMAX to that value. This simulates the situation where
higher revs don't exist yet.
Signed-off-by: Florian Achleitner <cded5fff9f7cb3ef9bc8c685776ce437facda4c3@gmail.com>
Acked-by: David Michael Barr <e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98@rr-dav.id.au>
Signed-off-by: Junio C Hamano <a6723cc3f76163bf7adb636a73ac3b0ceb3e6b9b@pobox.com><commit_after>
|
#!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys, os
def getrevlimit():
var = 'SVNRMAX'
if os.environ.has_key(var):
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/': filename = filename[:-1] #remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r');
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '': break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and l == 'Revision-number: %s\n' % upper:
break;
if state == 'header' or state == 'selection':
if state == 'selection': wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print "usage: %s dump URL -rLOWER:UPPER"
sys.exit(1)
if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None: r[1] = getrevlimit()
if writedump(url, r[0], r[1]): ret = 0
else: ret = 1
sys.exit(ret)
|
Add a svnrdump-simulator replaying a dump file for testing
To ease testing without depending on a reachable svn server, this
compact python script mimics parts of svnrdumps behaviour. It
requires the remote url to start with sim://.
Start and end revisions are evaluated. If the requested revision
doesn't exist, as it is the case with incremental imports, if no new
commit was added, it returns 1 (like svnrdump).
To allow using the same dump file for simulating multiple incremental
imports, the highest revision can be limited by setting the environment
variable SVNRMAX to that value. This simulates the situation where
higher revs don't exist yet.
Signed-off-by: Florian Achleitner <cded5fff9f7cb3ef9bc8c685776ce437facda4c3@gmail.com>
Acked-by: David Michael Barr <e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98@rr-dav.id.au>
Signed-off-by: Junio C Hamano <a6723cc3f76163bf7adb636a73ac3b0ceb3e6b9b@pobox.com>#!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys, os
def getrevlimit():
var = 'SVNRMAX'
if os.environ.has_key(var):
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/': filename = filename[:-1] #remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r');
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '': break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and l == 'Revision-number: %s\n' % upper:
break;
if state == 'header' or state == 'selection':
if state == 'selection': wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print "usage: %s dump URL -rLOWER:UPPER"
sys.exit(1)
if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None: r[1] = getrevlimit()
if writedump(url, r[0], r[1]): ret = 0
else: ret = 1
sys.exit(ret)
|
<commit_before><commit_msg>Add a svnrdump-simulator replaying a dump file for testing
To ease testing without depending on a reachable svn server, this
compact python script mimics parts of svnrdumps behaviour. It
requires the remote url to start with sim://.
Start and end revisions are evaluated. If the requested revision
doesn't exist, as it is the case with incremental imports, if no new
commit was added, it returns 1 (like svnrdump).
To allow using the same dump file for simulating multiple incremental
imports, the highest revision can be limited by setting the environment
variable SVNRMAX to that value. This simulates the situation where
higher revs don't exist yet.
Signed-off-by: Florian Achleitner <cded5fff9f7cb3ef9bc8c685776ce437facda4c3@gmail.com>
Acked-by: David Michael Barr <e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98@rr-dav.id.au>
Signed-off-by: Junio C Hamano <a6723cc3f76163bf7adb636a73ac3b0ceb3e6b9b@pobox.com><commit_after>#!/usr/bin/python
"""
Simulates svnrdump by replaying an existing dump from a file, taking care
of the specified revision range.
To simulate incremental imports the environment variable SVNRMAX can be set
to the highest revision that should be available.
"""
import sys, os
def getrevlimit():
var = 'SVNRMAX'
if os.environ.has_key(var):
return os.environ[var]
return None
def writedump(url, lower, upper):
if url.startswith('sim://'):
filename = url[6:]
if filename[-1] == '/': filename = filename[:-1] #remove terminating slash
else:
raise ValueError('sim:// url required')
f = open(filename, 'r');
state = 'header'
wroterev = False
while(True):
l = f.readline()
if l == '': break
if state == 'header' and l.startswith('Revision-number: '):
state = 'prefix'
if state == 'prefix' and l == 'Revision-number: %s\n' % lower:
state = 'selection'
if not upper == 'HEAD' and state == 'selection' and l == 'Revision-number: %s\n' % upper:
break;
if state == 'header' or state == 'selection':
if state == 'selection': wroterev = True
sys.stdout.write(l)
return wroterev
if __name__ == "__main__":
if not (len(sys.argv) in (3, 4, 5)):
print "usage: %s dump URL -rLOWER:UPPER"
sys.exit(1)
if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.')
url = sys.argv[2]
r = ('0', 'HEAD')
if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r':
r = sys.argv[3][2:].lstrip().split(':')
if not getrevlimit() is None: r[1] = getrevlimit()
if writedump(url, r[0], r[1]): ret = 0
else: ret = 1
sys.exit(ret)
|
|
2861531067e6aa8d0f25a9808d38b607b69f427f
|
utilities/unicode_to_ascii.py
|
utilities/unicode_to_ascii.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
Python
|
bsd-3-clause
|
colour-science/colour-hdri
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
<commit_before><commit_msg>Convert unicode characters to ASCII using Canonical Decomposition (NFD).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
<commit_before><commit_msg>Convert unicode characters to ASCII using Canonical Decomposition (NFD).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
|
1b38f4f7bf3e485c319626ee631a391271cb4ec3
|
examples/imagenet/compute_mean.py
|
examples/imagenet/compute_mean.py
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
Add one more empty line to end of import to adjust to H306
|
Add one more empty line to end of import to adjust to H306
|
Python
|
mit
|
sinhrks/chainer,kikusu/chainer,ktnyt/chainer,hvy/chainer,muupan/chainer,laysakura/chainer,jnishi/chainer,1986ks/chainer,ktnyt/chainer,okuta/chainer,AlpacaDB/chainer,wkentaro/chainer,cupy/cupy,ysekky/chainer,keisuke-umezawa/chainer,cemoody/chainer,umitanuki/chainer,delta2323/chainer,chainer/chainer,kashif/chainer,jnishi/chainer,kuwa32/chainer,kikusu/chainer,tereka114/chainer,minhpqn/chainer,hvy/chainer,ktnyt/chainer,anaruse/chainer,wkentaro/chainer,woodshop/chainer,cupy/cupy,hvy/chainer,okuta/chainer,okuta/chainer,ktnyt/chainer,pfnet/chainer,ronekko/chainer,woodshop/complex-chainer,ikasumi/chainer,niboshi/chainer,niboshi/chainer,jnishi/chainer,niboshi/chainer,kiyukuta/chainer,wavelets/chainer,jfsantos/chainer,keisuke-umezawa/chainer,benob/chainer,hidenori-t/chainer,tscohen/chainer,aonotas/chainer,truongdq/chainer,yanweifu/chainer,hvy/chainer,muupan/chainer,benob/chainer,keisuke-umezawa/chainer,elviswf/chainer,cupy/cupy,cupy/cupy,masia02/chainer,AlpacaDB/chainer,niboshi/chainer,t-abe/chainer,Kaisuke5/chainer,rezoo/chainer,bayerj/chainer,tigerneil/chainer,wkentaro/chainer,wkentaro/chainer,chainer/chainer,ytoyama/yans_chainer_hackathon,sinhrks/chainer,t-abe/chainer,keisuke-umezawa/chainer,okuta/chainer,truongdq/chainer,jnishi/chainer,sou81821/chainer,chainer/chainer,tkerola/chainer,chainer/chainer
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
Add one more empty line to end of import to adjust to H306
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
<commit_before>#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
<commit_msg>Add one more empty line to end of import to adjust to H306<commit_after>
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
Add one more empty line to end of import to adjust to H306#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
<commit_before>#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
<commit_msg>Add one more empty line to end of import to adjust to H306<commit_after>#!/usr/bin/env python
import argparse
import sys
import cv2
import numpy
import six.moves.cPickle as pickle
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset', help='Path to training image-label list file')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
sum_image = None
count = 0
for line in open(args.dataset):
filepath = line.strip().split()[0]
image = cv2.imread(filepath)
image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)
if sum_image is None:
sum_image = numpy.ndarray(image.shape, dtype=numpy.float32)
sum_image[:] = image
else:
sum_image += image
count += 1
sys.stderr.write('\r{}'.format(count))
sys.stderr.flush()
sys.stderr.write('\n')
mean = sum_image / count
pickle.dump(mean, open(args.output, 'wb'), -1)
|
d3ac6ba3ecd754f6f996eaa9107ea1d28074e4a3
|
src/python/tests/generators/transaction_generators/test_customer_transaction_parameters_generator.py
|
src/python/tests/generators/transaction_generators/test_customer_transaction_parameters_generator.py
|
import unittest
import simulation_parameters as sim_param
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParameters
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParametersGenerator
class TestCustomerTransactionParametersGenerator(unittest.TestCase):
def test_generate(self):
generator = CustomerTransactionParametersGenerator()
trans_params = generator.generate()
self.assertIsInstance(trans_params, CustomerTransactionParameters)
self.assertIsInstance(trans_params.pet_counts, dict)
total_count = sum(trans_params.pet_counts.values())
self.assertTrue(total_count >= sim_param.MIN_PETS)
self.assertTrue(total_count <= sim_param.MAX_PETS)
self.assertTrue(trans_params.average_transaction_trigger_time >= sim_param.TRANSACTION_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_transaction_trigger_time <= sim_param.TRANSACTION_TRIGGER_TIME_MAX)
self.assertTrue(trans_params.average_purchase_trigger_time >= sim_param.PURCHASE_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_purchase_trigger_time <= sim_param.PURCHASE_TRIGGER_TIME_MAX)
|
Add unit tests for customer transaction parameters generator.py
|
Add unit tests for customer transaction parameters generator.py
|
Python
|
apache-2.0
|
rnowling/bigpetstore-data-generator,rnowling/bigpetstore-data-generator,rnowling/bigpetstore-data-generator
|
Add unit tests for customer transaction parameters generator.py
|
import unittest
import simulation_parameters as sim_param
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParameters
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParametersGenerator
class TestCustomerTransactionParametersGenerator(unittest.TestCase):
def test_generate(self):
generator = CustomerTransactionParametersGenerator()
trans_params = generator.generate()
self.assertIsInstance(trans_params, CustomerTransactionParameters)
self.assertIsInstance(trans_params.pet_counts, dict)
total_count = sum(trans_params.pet_counts.values())
self.assertTrue(total_count >= sim_param.MIN_PETS)
self.assertTrue(total_count <= sim_param.MAX_PETS)
self.assertTrue(trans_params.average_transaction_trigger_time >= sim_param.TRANSACTION_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_transaction_trigger_time <= sim_param.TRANSACTION_TRIGGER_TIME_MAX)
self.assertTrue(trans_params.average_purchase_trigger_time >= sim_param.PURCHASE_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_purchase_trigger_time <= sim_param.PURCHASE_TRIGGER_TIME_MAX)
|
<commit_before><commit_msg>Add unit tests for customer transaction parameters generator.py<commit_after>
|
import unittest
import simulation_parameters as sim_param
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParameters
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParametersGenerator
class TestCustomerTransactionParametersGenerator(unittest.TestCase):
def test_generate(self):
generator = CustomerTransactionParametersGenerator()
trans_params = generator.generate()
self.assertIsInstance(trans_params, CustomerTransactionParameters)
self.assertIsInstance(trans_params.pet_counts, dict)
total_count = sum(trans_params.pet_counts.values())
self.assertTrue(total_count >= sim_param.MIN_PETS)
self.assertTrue(total_count <= sim_param.MAX_PETS)
self.assertTrue(trans_params.average_transaction_trigger_time >= sim_param.TRANSACTION_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_transaction_trigger_time <= sim_param.TRANSACTION_TRIGGER_TIME_MAX)
self.assertTrue(trans_params.average_purchase_trigger_time >= sim_param.PURCHASE_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_purchase_trigger_time <= sim_param.PURCHASE_TRIGGER_TIME_MAX)
|
Add unit tests for customer transaction parameters generator.pyimport unittest
import simulation_parameters as sim_param
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParameters
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParametersGenerator
class TestCustomerTransactionParametersGenerator(unittest.TestCase):
def test_generate(self):
generator = CustomerTransactionParametersGenerator()
trans_params = generator.generate()
self.assertIsInstance(trans_params, CustomerTransactionParameters)
self.assertIsInstance(trans_params.pet_counts, dict)
total_count = sum(trans_params.pet_counts.values())
self.assertTrue(total_count >= sim_param.MIN_PETS)
self.assertTrue(total_count <= sim_param.MAX_PETS)
self.assertTrue(trans_params.average_transaction_trigger_time >= sim_param.TRANSACTION_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_transaction_trigger_time <= sim_param.TRANSACTION_TRIGGER_TIME_MAX)
self.assertTrue(trans_params.average_purchase_trigger_time >= sim_param.PURCHASE_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_purchase_trigger_time <= sim_param.PURCHASE_TRIGGER_TIME_MAX)
|
<commit_before><commit_msg>Add unit tests for customer transaction parameters generator.py<commit_after>import unittest
import simulation_parameters as sim_param
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParameters
from generators.transaction_generator.customer_transaction_parameters_generator import CustomerTransactionParametersGenerator
class TestCustomerTransactionParametersGenerator(unittest.TestCase):
def test_generate(self):
generator = CustomerTransactionParametersGenerator()
trans_params = generator.generate()
self.assertIsInstance(trans_params, CustomerTransactionParameters)
self.assertIsInstance(trans_params.pet_counts, dict)
total_count = sum(trans_params.pet_counts.values())
self.assertTrue(total_count >= sim_param.MIN_PETS)
self.assertTrue(total_count <= sim_param.MAX_PETS)
self.assertTrue(trans_params.average_transaction_trigger_time >= sim_param.TRANSACTION_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_transaction_trigger_time <= sim_param.TRANSACTION_TRIGGER_TIME_MAX)
self.assertTrue(trans_params.average_purchase_trigger_time >= sim_param.PURCHASE_TRIGGER_TIME_MIN)
self.assertTrue(trans_params.average_purchase_trigger_time <= sim_param.PURCHASE_TRIGGER_TIME_MAX)
|
|
3b0ed9b42b23a18ead0f07f221cafe89f9c8463e
|
tools/refract-filter.py
|
tools/refract-filter.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import json
import textwrap
from collections import OrderedDict
try:
import yaml
except ImportError:
yaml = None
VERSION = "0.1"
def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def walk(node, cb):
if isinstance(node, dict):
for key, item in node.items():
cb(key, item)
walk(item, cb)
elif type(node) is list:
for item in iter(node):
cb(None, item)
walk(item, cb)
def print_body(key, item):
if type(item) is OrderedDict and \
'element' in item.keys() and \
item['element'] == 'asset':
print(item['content'])
def main():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Simple filter for refract, prints out the json and JSONSchema content
of the datastrucutres. Input is either stdin or given files.'''),
epilog=textwrap.dedent('''\
Example:
refract-filter.py -vj test/fixtures/schema/*.json
drafter blueprint.apib | refract-filter.py'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-j', '--json',
help='input is json format and not yaml',
action='store_true', default=False)
parser.add_argument('-V', '--version', help='print version info',
action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='verbose (print file names)',
action='store_true', default=False)
parser.add_argument('file', type=argparse.FileType('r'), nargs='*')
args = parser.parse_args()
if yaml is None:
args.json = True
if args.verbose:
print("Pyaml not found, only json format supported, -j in effect",
file=sys.stderr)
if args.version:
print(VERSION + " refract-filter.py")
sys.exit(0)
if not args.file:
args.file.append(sys.stdin)
for f in args.file:
if args.verbose:
print(f.name, file=sys.stderr)
if args.json:
data = json.load(f, object_pairs_hook=OrderedDict)
else:
data = yaml_load(f)
walk(data, print_body)
if __name__ == '__main__':
main()
|
Add tool for json and JSONschema pretty printing
|
Add tool for json and JSONschema pretty printing
|
Python
|
mit
|
apiaryio/drafter,apiaryio/drafter,apiaryio/drafter,apiaryio/drafter,apiaryio/drafter
|
Add tool for json and JSONschema pretty printing
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import json
import textwrap
from collections import OrderedDict
try:
import yaml
except ImportError:
yaml = None
VERSION = "0.1"
def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def walk(node, cb):
if isinstance(node, dict):
for key, item in node.items():
cb(key, item)
walk(item, cb)
elif type(node) is list:
for item in iter(node):
cb(None, item)
walk(item, cb)
def print_body(key, item):
if type(item) is OrderedDict and \
'element' in item.keys() and \
item['element'] == 'asset':
print(item['content'])
def main():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Simple filter for refract, prints out the json and JSONSchema content
of the datastrucutres. Input is either stdin or given files.'''),
epilog=textwrap.dedent('''\
Example:
refract-filter.py -vj test/fixtures/schema/*.json
drafter blueprint.apib | refract-filter.py'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-j', '--json',
help='input is json format and not yaml',
action='store_true', default=False)
parser.add_argument('-V', '--version', help='print version info',
action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='verbose (print file names)',
action='store_true', default=False)
parser.add_argument('file', type=argparse.FileType('r'), nargs='*')
args = parser.parse_args()
if yaml is None:
args.json = True
if args.verbose:
print("Pyaml not found, only json format supported, -j in effect",
file=sys.stderr)
if args.version:
print(VERSION + " refract-filter.py")
sys.exit(0)
if not args.file:
args.file.append(sys.stdin)
for f in args.file:
if args.verbose:
print(f.name, file=sys.stderr)
if args.json:
data = json.load(f, object_pairs_hook=OrderedDict)
else:
data = yaml_load(f)
walk(data, print_body)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool for json and JSONschema pretty printing<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import json
import textwrap
from collections import OrderedDict
try:
import yaml
except ImportError:
yaml = None
VERSION = "0.1"
def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def walk(node, cb):
if isinstance(node, dict):
for key, item in node.items():
cb(key, item)
walk(item, cb)
elif type(node) is list:
for item in iter(node):
cb(None, item)
walk(item, cb)
def print_body(key, item):
if type(item) is OrderedDict and \
'element' in item.keys() and \
item['element'] == 'asset':
print(item['content'])
def main():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Simple filter for refract, prints out the json and JSONSchema content
of the datastrucutres. Input is either stdin or given files.'''),
epilog=textwrap.dedent('''\
Example:
refract-filter.py -vj test/fixtures/schema/*.json
drafter blueprint.apib | refract-filter.py'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-j', '--json',
help='input is json format and not yaml',
action='store_true', default=False)
parser.add_argument('-V', '--version', help='print version info',
action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='verbose (print file names)',
action='store_true', default=False)
parser.add_argument('file', type=argparse.FileType('r'), nargs='*')
args = parser.parse_args()
if yaml is None:
args.json = True
if args.verbose:
print("Pyaml not found, only json format supported, -j in effect",
file=sys.stderr)
if args.version:
print(VERSION + " refract-filter.py")
sys.exit(0)
if not args.file:
args.file.append(sys.stdin)
for f in args.file:
if args.verbose:
print(f.name, file=sys.stderr)
if args.json:
data = json.load(f, object_pairs_hook=OrderedDict)
else:
data = yaml_load(f)
walk(data, print_body)
if __name__ == '__main__':
main()
|
Add tool for json and JSONschema pretty printing#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import json
import textwrap
from collections import OrderedDict
try:
import yaml
except ImportError:
yaml = None
VERSION = "0.1"
def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def walk(node, cb):
if isinstance(node, dict):
for key, item in node.items():
cb(key, item)
walk(item, cb)
elif type(node) is list:
for item in iter(node):
cb(None, item)
walk(item, cb)
def print_body(key, item):
if type(item) is OrderedDict and \
'element' in item.keys() and \
item['element'] == 'asset':
print(item['content'])
def main():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Simple filter for refract, prints out the json and JSONSchema content
of the datastrucutres. Input is either stdin or given files.'''),
epilog=textwrap.dedent('''\
Example:
refract-filter.py -vj test/fixtures/schema/*.json
drafter blueprint.apib | refract-filter.py'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-j', '--json',
help='input is json format and not yaml',
action='store_true', default=False)
parser.add_argument('-V', '--version', help='print version info',
action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='verbose (print file names)',
action='store_true', default=False)
parser.add_argument('file', type=argparse.FileType('r'), nargs='*')
args = parser.parse_args()
if yaml is None:
args.json = True
if args.verbose:
print("Pyaml not found, only json format supported, -j in effect",
file=sys.stderr)
if args.version:
print(VERSION + " refract-filter.py")
sys.exit(0)
if not args.file:
args.file.append(sys.stdin)
for f in args.file:
if args.verbose:
print(f.name, file=sys.stderr)
if args.json:
data = json.load(f, object_pairs_hook=OrderedDict)
else:
data = yaml_load(f)
walk(data, print_body)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool for json and JSONschema pretty printing<commit_after>#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import json
import textwrap
from collections import OrderedDict
try:
import yaml
except ImportError:
yaml = None
VERSION = "0.1"
def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def walk(node, cb):
if isinstance(node, dict):
for key, item in node.items():
cb(key, item)
walk(item, cb)
elif type(node) is list:
for item in iter(node):
cb(None, item)
walk(item, cb)
def print_body(key, item):
if type(item) is OrderedDict and \
'element' in item.keys() and \
item['element'] == 'asset':
print(item['content'])
def main():
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Simple filter for refract, prints out the json and JSONSchema content
of the datastrucutres. Input is either stdin or given files.'''),
epilog=textwrap.dedent('''\
Example:
refract-filter.py -vj test/fixtures/schema/*.json
drafter blueprint.apib | refract-filter.py'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-j', '--json',
help='input is json format and not yaml',
action='store_true', default=False)
parser.add_argument('-V', '--version', help='print version info',
action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='verbose (print file names)',
action='store_true', default=False)
parser.add_argument('file', type=argparse.FileType('r'), nargs='*')
args = parser.parse_args()
if yaml is None:
args.json = True
if args.verbose:
print("Pyaml not found, only json format supported, -j in effect",
file=sys.stderr)
if args.version:
print(VERSION + " refract-filter.py")
sys.exit(0)
if not args.file:
args.file.append(sys.stdin)
for f in args.file:
if args.verbose:
print(f.name, file=sys.stderr)
if args.json:
data = json.load(f, object_pairs_hook=OrderedDict)
else:
data = yaml_load(f)
walk(data, print_body)
if __name__ == '__main__':
main()
|
|
5a80583cdfb6f43cc31333eafcb544b7c7b49f08
|
tools/subset_symbols.py
|
tools/subset_symbols.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
Add tool for subsetting symbols.
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94
|
Python
|
apache-2.0
|
googlefonts/nototools,namemealrady/nototools,googlefonts/nototools,googlei18n/nototools,googlefonts/nototools,namemealrady/nototools,pathumego/nototools,pathumego/nototools,davelab6/nototools,dougfelt/nototools,dougfelt/nototools,davelab6/nototools,googlei18n/nototools,pahans/nototools,moyogo/nototools,moyogo/nototools,googlefonts/nototools,dougfelt/nototools,pathumego/nototools,googlefonts/nototools,davelab6/nototools,anthrotype/nototools,moyogo/nototools,googlei18n/nototools,namemealrady/nototools,pahans/nototools,anthrotype/nototools,anthrotype/nototools,pahans/nototools
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94<commit_after>
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94<commit_after>#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
|
d547eaef2b81d1a66eeae6a135059ef98aee7713
|
tools/version_update.py
|
tools/version_update.py
|
import sys
from xml.dom.minidom import parse
pom = open("pom.xml")
dom = parse(pom)
pom.close()
mv = dom.getElementsByTagName('motech.version')
mv[0].childNodes[0].data = sys.argv[1]
f = open("pom.xml", 'w')
dom.writexml(f)
f.close()
|
Add a small script to update motech-version in the pom
|
Add a small script to update motech-version in the pom
Change-Id: I15f828394346dd8260e1f1c2e899a6adf6c27d5d
|
Python
|
bsd-3-clause
|
wstrzelczyk/modules,pgesek/modules,sebbrudzinski/modules,pgesek/modules,ngraczewski/modules,pmuchowski/modules,tstalka/modules,atish160384/modules,ScottKimball/modules,LukSkarDev/modules,justin-hayes/modules,ngraczewski/modules,martokarski/modules,tstalka/modules,justin-hayes/modules,atish160384/modules,frankhuster/modules,pmuchowski/modules,ngraczewski/modules,justin-hayes/modules,koshalt/modules,frankhuster/modules,pgesek/modules,1stmateusz/modules,1stmateusz/modules,tstalka/modules,mkwiatkowskisoldevelo/modules,LukSkarDev/modules,wstrzelczyk/modules,tstalka/modules,ScottKimball/modules,ngraczewski/modules,koshalt/modules,sebbrudzinski/modules,frankhuster/modules,pgesek/modules,frankhuster/modules,smalecki/modules,1stmateusz/modules,pmuchowski/modules,shubhambeehyv/modules,shubhambeehyv/modules,koshalt/modules,pmuchowski/modules,martokarski/modules,LukSkarDev/modules,martokarski/modules,justin-hayes/modules,mkwiatkowskisoldevelo/modules,wstrzelczyk/modules,sebbrudzinski/modules,sebbrudzinski/modules,martokarski/modules,mkwiatkowskisoldevelo/modules,mkwiatkowskisoldevelo/modules,shubhambeehyv/modules,smalecki/modules,smalecki/modules,atish160384/modules,ScottKimball/modules,atish160384/modules,LukSkarDev/modules,ScottKimball/modules,1stmateusz/modules,smalecki/modules,koshalt/modules,shubhambeehyv/modules,wstrzelczyk/modules
|
Add a small script to update motech-version in the pom
Change-Id: I15f828394346dd8260e1f1c2e899a6adf6c27d5d
|
import sys
from xml.dom.minidom import parse
pom = open("pom.xml")
dom = parse(pom)
pom.close()
mv = dom.getElementsByTagName('motech.version')
mv[0].childNodes[0].data = sys.argv[1]
f = open("pom.xml", 'w')
dom.writexml(f)
f.close()
|
<commit_before><commit_msg>Add a small script to update motech-version in the pom
Change-Id: I15f828394346dd8260e1f1c2e899a6adf6c27d5d<commit_after>
|
import sys
from xml.dom.minidom import parse
pom = open("pom.xml")
dom = parse(pom)
pom.close()
mv = dom.getElementsByTagName('motech.version')
mv[0].childNodes[0].data = sys.argv[1]
f = open("pom.xml", 'w')
dom.writexml(f)
f.close()
|
Add a small script to update motech-version in the pom
Change-Id: I15f828394346dd8260e1f1c2e899a6adf6c27d5dimport sys
from xml.dom.minidom import parse
pom = open("pom.xml")
dom = parse(pom)
pom.close()
mv = dom.getElementsByTagName('motech.version')
mv[0].childNodes[0].data = sys.argv[1]
f = open("pom.xml", 'w')
dom.writexml(f)
f.close()
|
<commit_before><commit_msg>Add a small script to update motech-version in the pom
Change-Id: I15f828394346dd8260e1f1c2e899a6adf6c27d5d<commit_after>import sys
from xml.dom.minidom import parse
pom = open("pom.xml")
dom = parse(pom)
pom.close()
mv = dom.getElementsByTagName('motech.version')
mv[0].childNodes[0].data = sys.argv[1]
f = open("pom.xml", 'w')
dom.writexml(f)
f.close()
|
|
7671e3dfe93853f318cec953dcdbd0fbb2584912
|
Utilities/make_vm.py
|
Utilities/make_vm.py
|
"""
Generate the enum code for the VM header file.
"""
def getmin(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
else:
return (k >> 8)
def getmax(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
elif ((k & 0x80) == 0):
return (k & 0xff)
else:
return -1;
def getstep(k):
if ((k & 0x8000) != 0):
return 1
elif ((k & 0x80) == 0):
return 1
else:
return ((-k) & 0xff)
def printm(k):
n = getmin(k)
m = getmax(k)
s = getstep(k)
a = 'M'
a += str(n)
if (m > n or m < 0):
a += 'T'
if (s > 1):
a += str(s)
if (m > 0):
a += str(m)
else:
a += 'N'
return a
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
nlist = []
for i in sys.argv[1:]:
nlist.append(int(i, 16))
else:
nlist = [0x0000, 0x00FF, 0x7F7F, 0x8080, 0x80FF,
0x8100, 0x8200, 0x8300, 0x8400, 0x8800]
nrange = range(0,33) + [63, 64, 99, 100]
for i in nrange:
mrange = range(i,33) + [63, 64, 99, 100, 0xff]
for j in mrange:
if j < i:
continue
nlist.append((i << 8) | j)
if j == 0xff and i > 1:
nlist.append((i << 8) | (j - i + 1))
nlist.sort()
for i in nlist:
m = printm(i)
if len(m) <= 9:
print ' %-9.9s = 0x%4.4X,' % (m, i)
|
Add the utility script that generated vtkDICOMVM.h
|
Add the utility script that generated vtkDICOMVM.h
|
Python
|
bsd-3-clause
|
dgobbi/vtk-dicom,hendradarwin/vtk-dicom,dgobbi/vtk-dicom,hendradarwin/vtk-dicom,hendradarwin/vtk-dicom,dgobbi/vtk-dicom
|
Add the utility script that generated vtkDICOMVM.h
|
"""
Generate the enum code for the VM header file.
"""
def getmin(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
else:
return (k >> 8)
def getmax(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
elif ((k & 0x80) == 0):
return (k & 0xff)
else:
return -1;
def getstep(k):
if ((k & 0x8000) != 0):
return 1
elif ((k & 0x80) == 0):
return 1
else:
return ((-k) & 0xff)
def printm(k):
n = getmin(k)
m = getmax(k)
s = getstep(k)
a = 'M'
a += str(n)
if (m > n or m < 0):
a += 'T'
if (s > 1):
a += str(s)
if (m > 0):
a += str(m)
else:
a += 'N'
return a
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
nlist = []
for i in sys.argv[1:]:
nlist.append(int(i, 16))
else:
nlist = [0x0000, 0x00FF, 0x7F7F, 0x8080, 0x80FF,
0x8100, 0x8200, 0x8300, 0x8400, 0x8800]
nrange = range(0,33) + [63, 64, 99, 100]
for i in nrange:
mrange = range(i,33) + [63, 64, 99, 100, 0xff]
for j in mrange:
if j < i:
continue
nlist.append((i << 8) | j)
if j == 0xff and i > 1:
nlist.append((i << 8) | (j - i + 1))
nlist.sort()
for i in nlist:
m = printm(i)
if len(m) <= 9:
print ' %-9.9s = 0x%4.4X,' % (m, i)
|
<commit_before><commit_msg>Add the utility script that generated vtkDICOMVM.h<commit_after>
|
"""
Generate the enum code for the VM header file.
"""
def getmin(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
else:
return (k >> 8)
def getmax(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
elif ((k & 0x80) == 0):
return (k & 0xff)
else:
return -1;
def getstep(k):
if ((k & 0x8000) != 0):
return 1
elif ((k & 0x80) == 0):
return 1
else:
return ((-k) & 0xff)
def printm(k):
n = getmin(k)
m = getmax(k)
s = getstep(k)
a = 'M'
a += str(n)
if (m > n or m < 0):
a += 'T'
if (s > 1):
a += str(s)
if (m > 0):
a += str(m)
else:
a += 'N'
return a
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
nlist = []
for i in sys.argv[1:]:
nlist.append(int(i, 16))
else:
nlist = [0x0000, 0x00FF, 0x7F7F, 0x8080, 0x80FF,
0x8100, 0x8200, 0x8300, 0x8400, 0x8800]
nrange = range(0,33) + [63, 64, 99, 100]
for i in nrange:
mrange = range(i,33) + [63, 64, 99, 100, 0xff]
for j in mrange:
if j < i:
continue
nlist.append((i << 8) | j)
if j == 0xff and i > 1:
nlist.append((i << 8) | (j - i + 1))
nlist.sort()
for i in nlist:
m = printm(i)
if len(m) <= 9:
print ' %-9.9s = 0x%4.4X,' % (m, i)
|
Add the utility script that generated vtkDICOMVM.h"""
Generate the enum code for the VM header file.
"""
def getmin(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
else:
return (k >> 8)
def getmax(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
elif ((k & 0x80) == 0):
return (k & 0xff)
else:
return -1;
def getstep(k):
if ((k & 0x8000) != 0):
return 1
elif ((k & 0x80) == 0):
return 1
else:
return ((-k) & 0xff)
def printm(k):
n = getmin(k)
m = getmax(k)
s = getstep(k)
a = 'M'
a += str(n)
if (m > n or m < 0):
a += 'T'
if (s > 1):
a += str(s)
if (m > 0):
a += str(m)
else:
a += 'N'
return a
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
nlist = []
for i in sys.argv[1:]:
nlist.append(int(i, 16))
else:
nlist = [0x0000, 0x00FF, 0x7F7F, 0x8080, 0x80FF,
0x8100, 0x8200, 0x8300, 0x8400, 0x8800]
nrange = range(0,33) + [63, 64, 99, 100]
for i in nrange:
mrange = range(i,33) + [63, 64, 99, 100, 0xff]
for j in mrange:
if j < i:
continue
nlist.append((i << 8) | j)
if j == 0xff and i > 1:
nlist.append((i << 8) | (j - i + 1))
nlist.sort()
for i in nlist:
m = printm(i)
if len(m) <= 9:
print ' %-9.9s = 0x%4.4X,' % (m, i)
|
<commit_before><commit_msg>Add the utility script that generated vtkDICOMVM.h<commit_after>"""
Generate the enum code for the VM header file.
"""
def getmin(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
else:
return (k >> 8)
def getmax(k):
if ((k & 0x8000) != 0):
return (k & 0x7fff)
elif ((k & 0x80) == 0):
return (k & 0xff)
else:
return -1;
def getstep(k):
if ((k & 0x8000) != 0):
return 1
elif ((k & 0x80) == 0):
return 1
else:
return ((-k) & 0xff)
def printm(k):
n = getmin(k)
m = getmax(k)
s = getstep(k)
a = 'M'
a += str(n)
if (m > n or m < 0):
a += 'T'
if (s > 1):
a += str(s)
if (m > 0):
a += str(m)
else:
a += 'N'
return a
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
nlist = []
for i in sys.argv[1:]:
nlist.append(int(i, 16))
else:
nlist = [0x0000, 0x00FF, 0x7F7F, 0x8080, 0x80FF,
0x8100, 0x8200, 0x8300, 0x8400, 0x8800]
nrange = range(0,33) + [63, 64, 99, 100]
for i in nrange:
mrange = range(i,33) + [63, 64, 99, 100, 0xff]
for j in mrange:
if j < i:
continue
nlist.append((i << 8) | j)
if j == 0xff and i > 1:
nlist.append((i << 8) | (j - i + 1))
nlist.sort()
for i in nlist:
m = printm(i)
if len(m) <= 9:
print ' %-9.9s = 0x%4.4X,' % (m, i)
|
|
10e51f918c208c1b3c6f27b28b085ac49b33431f
|
towers/beginner/level_001.py
|
towers/beginner/level_001.py
|
# --------
# |@ >|
# --------
level.description("You see before yourself a long hallway with stairs at "
"the end. There is nothing in the way.")
level.tip("Call warrior.walk_ to walk forward in the Player "
"'play_turn' method.")
level.time_bonus(15)
level.ace_score(10)
level.size(8, 1)
level.stairs(7, 0)
def a_func(warrior):
warrior.add_abilities('walk')
level.warrior(0, 0, func=a_func)
|
Add an initial tower that doesnt parse yet
|
Add an initial tower that doesnt parse yet
|
Python
|
mit
|
arbylee/python-warrior
|
Add an initial tower that doesnt parse yet
|
# --------
# |@ >|
# --------
level.description("You see before yourself a long hallway with stairs at "
"the end. There is nothing in the way.")
level.tip("Call warrior.walk_ to walk forward in the Player "
"'play_turn' method.")
level.time_bonus(15)
level.ace_score(10)
level.size(8, 1)
level.stairs(7, 0)
def a_func(warrior):
warrior.add_abilities('walk')
level.warrior(0, 0, func=a_func)
|
<commit_before><commit_msg>Add an initial tower that doesnt parse yet<commit_after>
|
# --------
# |@ >|
# --------
level.description("You see before yourself a long hallway with stairs at "
"the end. There is nothing in the way.")
level.tip("Call warrior.walk_ to walk forward in the Player "
"'play_turn' method.")
level.time_bonus(15)
level.ace_score(10)
level.size(8, 1)
level.stairs(7, 0)
def a_func(warrior):
warrior.add_abilities('walk')
level.warrior(0, 0, func=a_func)
|
Add an initial tower that doesnt parse yet# --------
# |@ >|
# --------
level.description("You see before yourself a long hallway with stairs at "
"the end. There is nothing in the way.")
level.tip("Call warrior.walk_ to walk forward in the Player "
"'play_turn' method.")
level.time_bonus(15)
level.ace_score(10)
level.size(8, 1)
level.stairs(7, 0)
def a_func(warrior):
warrior.add_abilities('walk')
level.warrior(0, 0, func=a_func)
|
<commit_before><commit_msg>Add an initial tower that doesnt parse yet<commit_after># --------
# |@ >|
# --------
level.description("You see before yourself a long hallway with stairs at "
"the end. There is nothing in the way.")
level.tip("Call warrior.walk_ to walk forward in the Player "
"'play_turn' method.")
level.time_bonus(15)
level.ace_score(10)
level.size(8, 1)
level.stairs(7, 0)
def a_func(warrior):
warrior.add_abilities('walk')
level.warrior(0, 0, func=a_func)
|
|
563f4fd48840655c849fa620d03bddf82ea63b50
|
project/velkoja/management/commands/check_holvi_overdue.py
|
project/velkoja/management/commands/check_holvi_overdue.py
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue()
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
Make the command to actually send the overdues
|
Make the command to actually send the overdues
|
Python
|
mit
|
rambo/asylum,hacklab-fi/asylum,jautero/asylum,rambo/asylum,HelsinkiHacklab/asylum,hacklab-fi/asylum,HelsinkiHacklab/asylum,jautero/asylum,rambo/asylum,HelsinkiHacklab/asylum,rambo/asylum,HelsinkiHacklab/asylum,jautero/asylum,hacklab-fi/asylum,jautero/asylum,hacklab-fi/asylum
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue()
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
Make the command to actually send the overdues
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
<commit_before># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue()
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
<commit_msg>Make the command to actually send the overdues<commit_after>
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue()
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
Make the command to actually send the overdues# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
<commit_before># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue()
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
<commit_msg>Make the command to actually send the overdues<commit_after># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.holvichecker import HolviOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = HolviOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n,i in notified:
print("Notified %s about %s" % (n.email, i.subject))
|
dbd523861bfb9abad8a52b1de28de85c0f128807
|
plume/linalg.py
|
plume/linalg.py
|
# 一些基本的线性代数算法
import numpy as np
def lu(A):
# LU 分解,要求必须非奇异
A = A.copy()
U = np.zeros(A.shape)
L = np.eye(A.shape[1])
for i in range(A.shape[1] - 1):
U[:i + 1, i] = A[:i + 1, i]
L[i + 1:, i] = A[i + 1:, i] / U[i, i]
A[i + 1:] -= L[i + 1:, i].reshape(-1, 1) * A[i, :]
U[:, -1] = A[:, -1]
return L, U
def ldu(A):
# LDU 分解
L, U = lu(A)
D = U.diagonal()
U = U / D.reshape(-1, 1)
return L, D, U
def forward_substitution(A, b):
# 前向代换
x = np.zeros(b.shape)
for i in range(b.shape[0]):
x[i] = (b[i] - np.sum(x[0:i] * A[i, :i].reshape(-1, 1), axis=0))/A[i, i]
return x
def backward_substitution(A, b):
# 后向代换
x = np.zeros(b.shape)
for i in range(b.shape[0] - 1, -1, -1):
x[i] = (b[i] - np.sum(x[i + 1:] * A[i, i + 1:].reshape(-1, 1), axis=0))/A[i, i]
return x
def solve(A, b):
# 解方程组(A非奇异),b为多组向量
L, U = lu(A)
y = forward_substitution(L, b)
x = backward_substitution(U, y)
return x
def inv(A):
# 矩阵求逆
return solve(A, np.eye(A.shape[0]))
if __name__ == '__main__':
c = np.array([[2., 2., 3.], [4., 7., 7.], [-2., 4., 5.]])
print(np.linalg.inv(c))
print(inv(c))
|
Add some linear algebra algorithms.
|
Add some linear algebra algorithms.
|
Python
|
mit
|
WiseDoge/plume
|
Add some linear algebra algorithms.
|
# 一些基本的线性代数算法
import numpy as np
def lu(A):
# LU 分解,要求必须非奇异
A = A.copy()
U = np.zeros(A.shape)
L = np.eye(A.shape[1])
for i in range(A.shape[1] - 1):
U[:i + 1, i] = A[:i + 1, i]
L[i + 1:, i] = A[i + 1:, i] / U[i, i]
A[i + 1:] -= L[i + 1:, i].reshape(-1, 1) * A[i, :]
U[:, -1] = A[:, -1]
return L, U
def ldu(A):
# LDU 分解
L, U = lu(A)
D = U.diagonal()
U = U / D.reshape(-1, 1)
return L, D, U
def forward_substitution(A, b):
# 前向代换
x = np.zeros(b.shape)
for i in range(b.shape[0]):
x[i] = (b[i] - np.sum(x[0:i] * A[i, :i].reshape(-1, 1), axis=0))/A[i, i]
return x
def backward_substitution(A, b):
# 后向代换
x = np.zeros(b.shape)
for i in range(b.shape[0] - 1, -1, -1):
x[i] = (b[i] - np.sum(x[i + 1:] * A[i, i + 1:].reshape(-1, 1), axis=0))/A[i, i]
return x
def solve(A, b):
# 解方程组(A非奇异),b为多组向量
L, U = lu(A)
y = forward_substitution(L, b)
x = backward_substitution(U, y)
return x
def inv(A):
# 矩阵求逆
return solve(A, np.eye(A.shape[0]))
if __name__ == '__main__':
c = np.array([[2., 2., 3.], [4., 7., 7.], [-2., 4., 5.]])
print(np.linalg.inv(c))
print(inv(c))
|
<commit_before><commit_msg>Add some linear algebra algorithms.<commit_after>
|
# 一些基本的线性代数算法
import numpy as np
def lu(A):
# LU 分解,要求必须非奇异
A = A.copy()
U = np.zeros(A.shape)
L = np.eye(A.shape[1])
for i in range(A.shape[1] - 1):
U[:i + 1, i] = A[:i + 1, i]
L[i + 1:, i] = A[i + 1:, i] / U[i, i]
A[i + 1:] -= L[i + 1:, i].reshape(-1, 1) * A[i, :]
U[:, -1] = A[:, -1]
return L, U
def ldu(A):
# LDU 分解
L, U = lu(A)
D = U.diagonal()
U = U / D.reshape(-1, 1)
return L, D, U
def forward_substitution(A, b):
# 前向代换
x = np.zeros(b.shape)
for i in range(b.shape[0]):
x[i] = (b[i] - np.sum(x[0:i] * A[i, :i].reshape(-1, 1), axis=0))/A[i, i]
return x
def backward_substitution(A, b):
# 后向代换
x = np.zeros(b.shape)
for i in range(b.shape[0] - 1, -1, -1):
x[i] = (b[i] - np.sum(x[i + 1:] * A[i, i + 1:].reshape(-1, 1), axis=0))/A[i, i]
return x
def solve(A, b):
# 解方程组(A非奇异),b为多组向量
L, U = lu(A)
y = forward_substitution(L, b)
x = backward_substitution(U, y)
return x
def inv(A):
# 矩阵求逆
return solve(A, np.eye(A.shape[0]))
if __name__ == '__main__':
c = np.array([[2., 2., 3.], [4., 7., 7.], [-2., 4., 5.]])
print(np.linalg.inv(c))
print(inv(c))
|
Add some linear algebra algorithms.# 一些基本的线性代数算法
import numpy as np
def lu(A):
# LU 分解,要求必须非奇异
A = A.copy()
U = np.zeros(A.shape)
L = np.eye(A.shape[1])
for i in range(A.shape[1] - 1):
U[:i + 1, i] = A[:i + 1, i]
L[i + 1:, i] = A[i + 1:, i] / U[i, i]
A[i + 1:] -= L[i + 1:, i].reshape(-1, 1) * A[i, :]
U[:, -1] = A[:, -1]
return L, U
def ldu(A):
# LDU 分解
L, U = lu(A)
D = U.diagonal()
U = U / D.reshape(-1, 1)
return L, D, U
def forward_substitution(A, b):
# 前向代换
x = np.zeros(b.shape)
for i in range(b.shape[0]):
x[i] = (b[i] - np.sum(x[0:i] * A[i, :i].reshape(-1, 1), axis=0))/A[i, i]
return x
def backward_substitution(A, b):
# 后向代换
x = np.zeros(b.shape)
for i in range(b.shape[0] - 1, -1, -1):
x[i] = (b[i] - np.sum(x[i + 1:] * A[i, i + 1:].reshape(-1, 1), axis=0))/A[i, i]
return x
def solve(A, b):
# 解方程组(A非奇异),b为多组向量
L, U = lu(A)
y = forward_substitution(L, b)
x = backward_substitution(U, y)
return x
def inv(A):
# 矩阵求逆
return solve(A, np.eye(A.shape[0]))
if __name__ == '__main__':
c = np.array([[2., 2., 3.], [4., 7., 7.], [-2., 4., 5.]])
print(np.linalg.inv(c))
print(inv(c))
|
<commit_before><commit_msg>Add some linear algebra algorithms.<commit_after># 一些基本的线性代数算法
import numpy as np
def lu(A):
# LU 分解,要求必须非奇异
A = A.copy()
U = np.zeros(A.shape)
L = np.eye(A.shape[1])
for i in range(A.shape[1] - 1):
U[:i + 1, i] = A[:i + 1, i]
L[i + 1:, i] = A[i + 1:, i] / U[i, i]
A[i + 1:] -= L[i + 1:, i].reshape(-1, 1) * A[i, :]
U[:, -1] = A[:, -1]
return L, U
def ldu(A):
# LDU 分解
L, U = lu(A)
D = U.diagonal()
U = U / D.reshape(-1, 1)
return L, D, U
def forward_substitution(A, b):
# 前向代换
x = np.zeros(b.shape)
for i in range(b.shape[0]):
x[i] = (b[i] - np.sum(x[0:i] * A[i, :i].reshape(-1, 1), axis=0))/A[i, i]
return x
def backward_substitution(A, b):
# 后向代换
x = np.zeros(b.shape)
for i in range(b.shape[0] - 1, -1, -1):
x[i] = (b[i] - np.sum(x[i + 1:] * A[i, i + 1:].reshape(-1, 1), axis=0))/A[i, i]
return x
def solve(A, b):
# 解方程组(A非奇异),b为多组向量
L, U = lu(A)
y = forward_substitution(L, b)
x = backward_substitution(U, y)
return x
def inv(A):
# 矩阵求逆
return solve(A, np.eye(A.shape[0]))
if __name__ == '__main__':
c = np.array([[2., 2., 3.], [4., 7., 7.], [-2., 4., 5.]])
print(np.linalg.inv(c))
print(inv(c))
|
|
a532da48349c645464b97fb618b83799269a6b4b
|
examples/terminal/powerline.py
|
examples/terminal/powerline.py
|
#!/usr/bin/env python
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
Fix issues with the terminal prompt example
|
Fix issues with the terminal prompt example
|
Python
|
mit
|
DoctorJellyface/powerline,junix/powerline,prvnkumar/powerline,lukw00/powerline,cyrixhero/powerline,DoctorJellyface/powerline,seanfisk/powerline,S0lll0s/powerline,areteix/powerline,magus424/powerline,wfscheper/powerline,xfumihiro/powerline,bartvm/powerline,firebitsbr/powerline,IvanAli/powerline,russellb/powerline,Luffin/powerline,bartvm/powerline,firebitsbr/powerline,EricSB/powerline,S0lll0s/powerline,dragon788/powerline,magus424/powerline,bezhermoso/powerline,seanfisk/powerline,cyrixhero/powerline,IvanAli/powerline,bezhermoso/powerline,dragon788/powerline,lukw00/powerline,prvnkumar/powerline,blindFS/powerline,IvanAli/powerline,prvnkumar/powerline,blindFS/powerline,keelerm84/powerline,wfscheper/powerline,firebitsbr/powerline,wfscheper/powerline,xfumihiro/powerline,darac/powerline,lukw00/powerline,DoctorJellyface/powerline,magus424/powerline,bezhermoso/powerline,EricSB/powerline,blindFS/powerline,darac/powerline,keelerm84/powerline,russellb/powerline,darac/powerline,EricSB/powerline,areteix/powerline,s0undt3ch/powerline,Liangjianghao/powerline,junix/powerline,s0undt3ch/powerline,Liangjianghao/powerline,kenrachynski/powerline,Luffin/powerline,Luffin/powerline,xfumihiro/powerline,xxxhycl2010/powerline,russellb/powerline,Liangjianghao/powerline,QuLogic/powerline,junix/powerline,kenrachynski/powerline,bartvm/powerline,seanfisk/powerline,dragon788/powerline,cyrixhero/powerline,kenrachynski/powerline,QuLogic/powerline,S0lll0s/powerline,areteix/powerline,xxxhycl2010/powerline,QuLogic/powerline,xxxhycl2010/powerline,s0undt3ch/powerline
|
#!/usr/bin/env python
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
Fix issues with the terminal prompt example
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
<commit_before>#!/usr/bin/env python
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
<commit_msg>Fix issues with the terminal prompt example<commit_after>
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
#!/usr/bin/env python
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
Fix issues with the terminal prompt example#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
<commit_before>#!/usr/bin/env python
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
<commit_msg>Fix issues with the terminal prompt example<commit_after>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Powerline terminal prompt example.
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from lib.core import Powerline, Segment
from lib.renderers import TerminalSegmentRenderer
powerline = Powerline([
Segment('⭤ SSH', 220, 166, attr=Segment.ATTR_BOLD),
Segment('username', 153, 31),
Segment('~', 248, 239),
Segment('projects', 248, 239),
Segment('powerline', 231, 239, attr=Segment.ATTR_BOLD),
Segment(filler=True),
])
print(powerline.render(TerminalSegmentRenderer()))
|
51a461032449ba10ed030f2d355817a2d2b65005
|
dec01/part1.py
|
dec01/part1.py
|
# Advent of Code
# Dec 1, Part 1
# @geekygirlsarah
import struct
inputFile = "input.txt"
# Tracking vars
x = 0
y = 0
facing = "N"
with open(inputFile) as f:
while True:
contents = f.readline(-1)
if not contents:
# print "End of file"
break
# print ("Contents: ", contents)
coords = contents.split(", ")
# print ("Split contents: ")
# print (coords)
for coord in coords:
dir = coord[0]
num = int(coord[1:])
if dir == "L":
print("Left " + str(num) + " blocks")
if facing == "N":
facing = "W"
x = x - num
elif facing == "E":
facing = "N"
y = y + num
elif facing == "S":
facing = "E"
x = x + num
elif facing == "W":
facing = "S"
y = y - num
elif dir == "R":
print("Right " + str(num) + " blocks")
if facing == "N":
facing = "E"
x = x + num
elif facing == "E":
facing = "S"
y = y - num
elif facing == "S":
facing = "W"
x = x - num
elif facing == "W":
facing = "N"
y = y + num
print("Now facing " + facing)
print(" x = " + str(x) + ", y = " + str(y))
print ("X = " + str(x))
print ("Y = " + str(y))
print ("Distance = " + str(abs(x) + abs(y)))
|
Add 12/1 part 1 solution
|
Add 12/1 part 1 solution
|
Python
|
mit
|
geekygirlsarah/adventofcode2016
|
Add 12/1 part 1 solution
|
# Advent of Code
# Dec 1, Part 1
# @geekygirlsarah
import struct
inputFile = "input.txt"
# Tracking vars
x = 0
y = 0
facing = "N"
with open(inputFile) as f:
while True:
contents = f.readline(-1)
if not contents:
# print "End of file"
break
# print ("Contents: ", contents)
coords = contents.split(", ")
# print ("Split contents: ")
# print (coords)
for coord in coords:
dir = coord[0]
num = int(coord[1:])
if dir == "L":
print("Left " + str(num) + " blocks")
if facing == "N":
facing = "W"
x = x - num
elif facing == "E":
facing = "N"
y = y + num
elif facing == "S":
facing = "E"
x = x + num
elif facing == "W":
facing = "S"
y = y - num
elif dir == "R":
print("Right " + str(num) + " blocks")
if facing == "N":
facing = "E"
x = x + num
elif facing == "E":
facing = "S"
y = y - num
elif facing == "S":
facing = "W"
x = x - num
elif facing == "W":
facing = "N"
y = y + num
print("Now facing " + facing)
print(" x = " + str(x) + ", y = " + str(y))
print ("X = " + str(x))
print ("Y = " + str(y))
print ("Distance = " + str(abs(x) + abs(y)))
|
<commit_before><commit_msg>Add 12/1 part 1 solution<commit_after>
|
# Advent of Code
# Dec 1, Part 1
# @geekygirlsarah
import struct
inputFile = "input.txt"
# Tracking vars
x = 0
y = 0
facing = "N"
with open(inputFile) as f:
while True:
contents = f.readline(-1)
if not contents:
# print "End of file"
break
# print ("Contents: ", contents)
coords = contents.split(", ")
# print ("Split contents: ")
# print (coords)
for coord in coords:
dir = coord[0]
num = int(coord[1:])
if dir == "L":
print("Left " + str(num) + " blocks")
if facing == "N":
facing = "W"
x = x - num
elif facing == "E":
facing = "N"
y = y + num
elif facing == "S":
facing = "E"
x = x + num
elif facing == "W":
facing = "S"
y = y - num
elif dir == "R":
print("Right " + str(num) + " blocks")
if facing == "N":
facing = "E"
x = x + num
elif facing == "E":
facing = "S"
y = y - num
elif facing == "S":
facing = "W"
x = x - num
elif facing == "W":
facing = "N"
y = y + num
print("Now facing " + facing)
print(" x = " + str(x) + ", y = " + str(y))
print ("X = " + str(x))
print ("Y = " + str(y))
print ("Distance = " + str(abs(x) + abs(y)))
|
Add 12/1 part 1 solution# Advent of Code
# Dec 1, Part 1
# @geekygirlsarah
import struct
inputFile = "input.txt"
# Tracking vars
x = 0
y = 0
facing = "N"
with open(inputFile) as f:
while True:
contents = f.readline(-1)
if not contents:
# print "End of file"
break
# print ("Contents: ", contents)
coords = contents.split(", ")
# print ("Split contents: ")
# print (coords)
for coord in coords:
dir = coord[0]
num = int(coord[1:])
if dir == "L":
print("Left " + str(num) + " blocks")
if facing == "N":
facing = "W"
x = x - num
elif facing == "E":
facing = "N"
y = y + num
elif facing == "S":
facing = "E"
x = x + num
elif facing == "W":
facing = "S"
y = y - num
elif dir == "R":
print("Right " + str(num) + " blocks")
if facing == "N":
facing = "E"
x = x + num
elif facing == "E":
facing = "S"
y = y - num
elif facing == "S":
facing = "W"
x = x - num
elif facing == "W":
facing = "N"
y = y + num
print("Now facing " + facing)
print(" x = " + str(x) + ", y = " + str(y))
print ("X = " + str(x))
print ("Y = " + str(y))
print ("Distance = " + str(abs(x) + abs(y)))
|
<commit_before><commit_msg>Add 12/1 part 1 solution<commit_after># Advent of Code
# Dec 1, Part 1
# @geekygirlsarah
import struct
inputFile = "input.txt"
# Tracking vars
x = 0
y = 0
facing = "N"
with open(inputFile) as f:
while True:
contents = f.readline(-1)
if not contents:
# print "End of file"
break
# print ("Contents: ", contents)
coords = contents.split(", ")
# print ("Split contents: ")
# print (coords)
for coord in coords:
dir = coord[0]
num = int(coord[1:])
if dir == "L":
print("Left " + str(num) + " blocks")
if facing == "N":
facing = "W"
x = x - num
elif facing == "E":
facing = "N"
y = y + num
elif facing == "S":
facing = "E"
x = x + num
elif facing == "W":
facing = "S"
y = y - num
elif dir == "R":
print("Right " + str(num) + " blocks")
if facing == "N":
facing = "E"
x = x + num
elif facing == "E":
facing = "S"
y = y - num
elif facing == "S":
facing = "W"
x = x - num
elif facing == "W":
facing = "N"
y = y + num
print("Now facing " + facing)
print(" x = " + str(x) + ", y = " + str(y))
print ("X = " + str(x))
print ("Y = " + str(y))
print ("Distance = " + str(abs(x) + abs(y)))
|
|
87b8b20f9d7e16d274a5f5130cd10beb1b69ff6c
|
test_microservice/toolkit/jsontools.py
|
test_microservice/toolkit/jsontools.py
|
# -*- coding: utf-8 -*-
# __ ___ ___ _ __ ___ ___ _ ___ ___ _
# (_ |\/| | __ | |_ (_ | __ /\ | | | / \ |\/| /\ | | / \ |\ |
# __) | | _|_ | |_ __) | /--\ |_| | \_/ | | /--\ | _|_ \_/ | \|
"""
JSON Toolkit
~~~~~~~~~~~~
Series of tools designed around JSON
Copyright (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
Created on June 26, 2017
"""
__title__ = 'jsontools'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
import logging
import json
LOG = logging.getLogger(__name__)
def load_test_data(directory, task):
"""Load test data from provided json file"""
with open(directory) as stream:
data = json.load(stream)
url = data["services"][task]["url"]
parameters = data["services"][task]["parameters"]
payload = data["services"][task]["payload"]
return url, parameters, payload
|
Add json tools to toolkit
|
Add json tools to toolkit
Create a new toolkit to interface with json files
|
Python
|
apache-2.0
|
akashkw/smi-test-automation,MichaelRegert/smi-test-automation
|
Add json tools to toolkit
Create a new toolkit to interface with json files
|
# -*- coding: utf-8 -*-
# __ ___ ___ _ __ ___ ___ _ ___ ___ _
# (_ |\/| | __ | |_ (_ | __ /\ | | | / \ |\/| /\ | | / \ |\ |
# __) | | _|_ | |_ __) | /--\ |_| | \_/ | | /--\ | _|_ \_/ | \|
"""
JSON Toolkit
~~~~~~~~~~~~
Series of tools designed around JSON
Copyright (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
Created on June 26, 2017
"""
__title__ = 'jsontools'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
import logging
import json
LOG = logging.getLogger(__name__)
def load_test_data(directory, task):
"""Load test data from provided json file"""
with open(directory) as stream:
data = json.load(stream)
url = data["services"][task]["url"]
parameters = data["services"][task]["parameters"]
payload = data["services"][task]["payload"]
return url, parameters, payload
|
<commit_before><commit_msg>Add json tools to toolkit
Create a new toolkit to interface with json files<commit_after>
|
# -*- coding: utf-8 -*-
# __ ___ ___ _ __ ___ ___ _ ___ ___ _
# (_ |\/| | __ | |_ (_ | __ /\ | | | / \ |\/| /\ | | / \ |\ |
# __) | | _|_ | |_ __) | /--\ |_| | \_/ | | /--\ | _|_ \_/ | \|
"""
JSON Toolkit
~~~~~~~~~~~~
Series of tools designed around JSON
Copyright (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
Created on June 26, 2017
"""
__title__ = 'jsontools'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
import logging
import json
LOG = logging.getLogger(__name__)
def load_test_data(directory, task):
"""Load test data from provided json file"""
with open(directory) as stream:
data = json.load(stream)
url = data["services"][task]["url"]
parameters = data["services"][task]["parameters"]
payload = data["services"][task]["payload"]
return url, parameters, payload
|
Add json tools to toolkit
Create a new toolkit to interface with json files# -*- coding: utf-8 -*-
# __ ___ ___ _ __ ___ ___ _ ___ ___ _
# (_ |\/| | __ | |_ (_ | __ /\ | | | / \ |\/| /\ | | / \ |\ |
# __) | | _|_ | |_ __) | /--\ |_| | \_/ | | /--\ | _|_ \_/ | \|
"""
JSON Toolkit
~~~~~~~~~~~~
Series of tools designed around JSON
Copyright (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
Created on June 26, 2017
"""
__title__ = 'jsontools'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
import logging
import json
LOG = logging.getLogger(__name__)
def load_test_data(directory, task):
"""Load test data from provided json file"""
with open(directory) as stream:
data = json.load(stream)
url = data["services"][task]["url"]
parameters = data["services"][task]["parameters"]
payload = data["services"][task]["payload"]
return url, parameters, payload
|
<commit_before><commit_msg>Add json tools to toolkit
Create a new toolkit to interface with json files<commit_after># -*- coding: utf-8 -*-
# __ ___ ___ _ __ ___ ___ _ ___ ___ _
# (_ |\/| | __ | |_ (_ | __ /\ | | | / \ |\/| /\ | | / \ |\ |
# __) | | _|_ | |_ __) | /--\ |_| | \_/ | | /--\ | _|_ \_/ | \|
"""
JSON Toolkit
~~~~~~~~~~~~
Series of tools designed around JSON
Copyright (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
Created on June 26, 2017
"""
__title__ = 'jsontools'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
import logging
import json
LOG = logging.getLogger(__name__)
def load_test_data(directory, task):
"""Load test data from provided json file"""
with open(directory) as stream:
data = json.load(stream)
url = data["services"][task]["url"]
parameters = data["services"][task]["parameters"]
payload = data["services"][task]["payload"]
return url, parameters, payload
|
|
408788b42de9cdde08a069db7f5709398c0febd4
|
tests/adapter/mongo/test_rank_model.py
|
tests/adapter/mongo/test_rank_model.py
|
from scout.server.app import create_app
from scout.server.extensions import store
def test_rank_model_from_url_snv(adapter, case_obj):
"""Test parsing and saving a SNV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SNV genetic model are available in the app context
RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/rank_model_-v"
RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
RANK_MODEL_LINK_PREFIX, rank_model_version, RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
def test_rank_model_from_url_sv(adapter, case_obj):
"""Test parsing and saving a SV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SV genetic model are available in the app context
SV_RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/svrank_model_-v"
SV_RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["sv_rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
SV_RANK_MODEL_LINK_PREFIX, rank_model_version, SV_RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
|
Add parsing and adding model to db tests
|
Add parsing and adding model to db tests
|
Python
|
bsd-3-clause
|
Clinical-Genomics/scout,Clinical-Genomics/scout,Clinical-Genomics/scout
|
Add parsing and adding model to db tests
|
from scout.server.app import create_app
from scout.server.extensions import store
def test_rank_model_from_url_snv(adapter, case_obj):
"""Test parsing and saving a SNV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SNV genetic model are available in the app context
RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/rank_model_-v"
RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
RANK_MODEL_LINK_PREFIX, rank_model_version, RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
def test_rank_model_from_url_sv(adapter, case_obj):
"""Test parsing and saving a SV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SV genetic model are available in the app context
SV_RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/svrank_model_-v"
SV_RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["sv_rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
SV_RANK_MODEL_LINK_PREFIX, rank_model_version, SV_RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
|
<commit_before><commit_msg>Add parsing and adding model to db tests<commit_after>
|
from scout.server.app import create_app
from scout.server.extensions import store
def test_rank_model_from_url_snv(adapter, case_obj):
"""Test parsing and saving a SNV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SNV genetic model are available in the app context
RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/rank_model_-v"
RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
RANK_MODEL_LINK_PREFIX, rank_model_version, RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
def test_rank_model_from_url_sv(adapter, case_obj):
"""Test parsing and saving a SV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SV genetic model are available in the app context
SV_RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/svrank_model_-v"
SV_RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["sv_rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
SV_RANK_MODEL_LINK_PREFIX, rank_model_version, SV_RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
|
Add parsing and adding model to db testsfrom scout.server.app import create_app
from scout.server.extensions import store
def test_rank_model_from_url_snv(adapter, case_obj):
"""Test parsing and saving a SNV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SNV genetic model are available in the app context
RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/rank_model_-v"
RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
RANK_MODEL_LINK_PREFIX, rank_model_version, RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
def test_rank_model_from_url_sv(adapter, case_obj):
"""Test parsing and saving a SV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SV genetic model are available in the app context
SV_RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/svrank_model_-v"
SV_RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["sv_rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
SV_RANK_MODEL_LINK_PREFIX, rank_model_version, SV_RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
|
<commit_before><commit_msg>Add parsing and adding model to db tests<commit_after>from scout.server.app import create_app
from scout.server.extensions import store
def test_rank_model_from_url_snv(adapter, case_obj):
"""Test parsing and saving a SNV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SNV genetic model are available in the app context
RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/rank_model_-v"
RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
RANK_MODEL_LINK_PREFIX, rank_model_version, RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
def test_rank_model_from_url_sv(adapter, case_obj):
"""Test parsing and saving a SV rank model object from a remote config file"""
# GIVEN that the params to retrieve a SV genetic model are available in the app context
SV_RANK_MODEL_LINK_PREFIX = "https://raw.githubusercontent.com/Clinical-Genomics/reference-files/master/rare-disease/rank_model/svrank_model_-v"
SV_RANK_MODEL_LINK_POSTFIX = "-.ini"
# GIVEN a rank model version saved in the case document
rank_model_version = case_obj["sv_rank_model_version"]
# WHEN model is retrieved from remote server
rank_model_dict = adapter.rank_model_from_url(
SV_RANK_MODEL_LINK_PREFIX, rank_model_version, SV_RANK_MODEL_LINK_POSTFIX
)
# THEN rank model should be retrieved
assert isinstance(rank_model_dict, dict)
# And should also be saved to database
assert adapter.rank_model_collection.find_one()
|
|
07f7281089ef799c369655145fae00a530080d8b
|
tests/test_sparse_matrix_operations.py
|
tests/test_sparse_matrix_operations.py
|
from __future__ import division
import math
import numpy as np
import pyviennacl as p
import scipy.sparse.linalg as spspla
from _common import *
from itertools import product
size, sparsity = 20, 0.1
dtype_tolerances = [('float32', 1.0E-3), ('float64', 1.0E-8)]
dense_matrix_getters = [('matrix', 'get_matrix'),
('matrix_range', 'get_matrix_range'),
('matrix_slice', 'get_matrix_slice'),
('matrix_trans', 'get_matrix_trans'),
('matrix_range_trans', 'get_matrix_range_trans'),
('matrix_slice_trans', 'get_matrix_slice_trans')]
sparse_matrix_types = [('compressed_matrix', 'p.CompressedMatrix'),
('coordinate_matrix', 'p.CoordinateMatrix'),
('ell_matrix', 'p.ELLMatrix'),
('hyb_matrix', 'p.HybridMatrix')]
rhs_vector_getters = [('vector', 'get_vector')] #,
#('vector_range', 'get_vector_range'),
#('vector_slice', 'get_vector_slice')]
Ax_matrix_operations = [
('dgemv', 'dot', 'dot')
]
for op_, d_t_, sparse_type_, vector_getter_ in product(Ax_matrix_operations, dtype_tolerances, sparse_matrix_types, rhs_vector_getters):
dt_ = d_t_[0]
tol_ = d_t_[1]
def factory(dt, tol, sparse_type, vector_getter, numpy_op, vcl_op):
def _test():
vcl_A = get_sparse_matrix(size, sparsity, dt, sparse_type)
numpy_A = vcl_A.as_ndarray()
numpy_x, vcl_x = vector_getter(size, dt)
numpy_b = numpy_op(numpy_A, numpy_x)
vcl_b = vcl_op(vcl_A, vcl_x)
# compare with numpy_solution
act_diff = math.fabs(diff(numpy_b, vcl_b))
assert act_diff <= tol, "diff was {} > tolerance {}".format(act_diff, tol)
return _test
exec("test_%s_%s_A_%s_x_%s = factory(p.%s, %g, %s, %s, %s, %s)" % (op_[0], sparse_type_[0], vector_getter_[0], dt_, dt_, tol_, sparse_type_[1], vector_getter_[1], op_[1], op_[2]))
|
Add sparse matrix operations test
|
Add sparse matrix operations test
|
Python
|
mit
|
viennacl/pyviennacl-dev,opticode/pyviennacl-dev,viennacl/pyviennacl-dev,opticode/pyviennacl-dev
|
Add sparse matrix operations test
|
from __future__ import division
import math
import numpy as np
import pyviennacl as p
import scipy.sparse.linalg as spspla
from _common import *
from itertools import product
size, sparsity = 20, 0.1
dtype_tolerances = [('float32', 1.0E-3), ('float64', 1.0E-8)]
dense_matrix_getters = [('matrix', 'get_matrix'),
('matrix_range', 'get_matrix_range'),
('matrix_slice', 'get_matrix_slice'),
('matrix_trans', 'get_matrix_trans'),
('matrix_range_trans', 'get_matrix_range_trans'),
('matrix_slice_trans', 'get_matrix_slice_trans')]
sparse_matrix_types = [('compressed_matrix', 'p.CompressedMatrix'),
('coordinate_matrix', 'p.CoordinateMatrix'),
('ell_matrix', 'p.ELLMatrix'),
('hyb_matrix', 'p.HybridMatrix')]
rhs_vector_getters = [('vector', 'get_vector')] #,
#('vector_range', 'get_vector_range'),
#('vector_slice', 'get_vector_slice')]
Ax_matrix_operations = [
('dgemv', 'dot', 'dot')
]
for op_, d_t_, sparse_type_, vector_getter_ in product(Ax_matrix_operations, dtype_tolerances, sparse_matrix_types, rhs_vector_getters):
dt_ = d_t_[0]
tol_ = d_t_[1]
def factory(dt, tol, sparse_type, vector_getter, numpy_op, vcl_op):
def _test():
vcl_A = get_sparse_matrix(size, sparsity, dt, sparse_type)
numpy_A = vcl_A.as_ndarray()
numpy_x, vcl_x = vector_getter(size, dt)
numpy_b = numpy_op(numpy_A, numpy_x)
vcl_b = vcl_op(vcl_A, vcl_x)
# compare with numpy_solution
act_diff = math.fabs(diff(numpy_b, vcl_b))
assert act_diff <= tol, "diff was {} > tolerance {}".format(act_diff, tol)
return _test
exec("test_%s_%s_A_%s_x_%s = factory(p.%s, %g, %s, %s, %s, %s)" % (op_[0], sparse_type_[0], vector_getter_[0], dt_, dt_, tol_, sparse_type_[1], vector_getter_[1], op_[1], op_[2]))
|
<commit_before><commit_msg>Add sparse matrix operations test<commit_after>
|
from __future__ import division
import math
import numpy as np
import pyviennacl as p
import scipy.sparse.linalg as spspla
from _common import *
from itertools import product
size, sparsity = 20, 0.1
dtype_tolerances = [('float32', 1.0E-3), ('float64', 1.0E-8)]
dense_matrix_getters = [('matrix', 'get_matrix'),
('matrix_range', 'get_matrix_range'),
('matrix_slice', 'get_matrix_slice'),
('matrix_trans', 'get_matrix_trans'),
('matrix_range_trans', 'get_matrix_range_trans'),
('matrix_slice_trans', 'get_matrix_slice_trans')]
sparse_matrix_types = [('compressed_matrix', 'p.CompressedMatrix'),
('coordinate_matrix', 'p.CoordinateMatrix'),
('ell_matrix', 'p.ELLMatrix'),
('hyb_matrix', 'p.HybridMatrix')]
rhs_vector_getters = [('vector', 'get_vector')] #,
#('vector_range', 'get_vector_range'),
#('vector_slice', 'get_vector_slice')]
Ax_matrix_operations = [
('dgemv', 'dot', 'dot')
]
for op_, d_t_, sparse_type_, vector_getter_ in product(Ax_matrix_operations, dtype_tolerances, sparse_matrix_types, rhs_vector_getters):
dt_ = d_t_[0]
tol_ = d_t_[1]
def factory(dt, tol, sparse_type, vector_getter, numpy_op, vcl_op):
def _test():
vcl_A = get_sparse_matrix(size, sparsity, dt, sparse_type)
numpy_A = vcl_A.as_ndarray()
numpy_x, vcl_x = vector_getter(size, dt)
numpy_b = numpy_op(numpy_A, numpy_x)
vcl_b = vcl_op(vcl_A, vcl_x)
# compare with numpy_solution
act_diff = math.fabs(diff(numpy_b, vcl_b))
assert act_diff <= tol, "diff was {} > tolerance {}".format(act_diff, tol)
return _test
exec("test_%s_%s_A_%s_x_%s = factory(p.%s, %g, %s, %s, %s, %s)" % (op_[0], sparse_type_[0], vector_getter_[0], dt_, dt_, tol_, sparse_type_[1], vector_getter_[1], op_[1], op_[2]))
|
Add sparse matrix operations testfrom __future__ import division
import math
import numpy as np
import pyviennacl as p
import scipy.sparse.linalg as spspla
from _common import *
from itertools import product
size, sparsity = 20, 0.1
dtype_tolerances = [('float32', 1.0E-3), ('float64', 1.0E-8)]
dense_matrix_getters = [('matrix', 'get_matrix'),
('matrix_range', 'get_matrix_range'),
('matrix_slice', 'get_matrix_slice'),
('matrix_trans', 'get_matrix_trans'),
('matrix_range_trans', 'get_matrix_range_trans'),
('matrix_slice_trans', 'get_matrix_slice_trans')]
sparse_matrix_types = [('compressed_matrix', 'p.CompressedMatrix'),
('coordinate_matrix', 'p.CoordinateMatrix'),
('ell_matrix', 'p.ELLMatrix'),
('hyb_matrix', 'p.HybridMatrix')]
rhs_vector_getters = [('vector', 'get_vector')] #,
#('vector_range', 'get_vector_range'),
#('vector_slice', 'get_vector_slice')]
Ax_matrix_operations = [
('dgemv', 'dot', 'dot')
]
for op_, d_t_, sparse_type_, vector_getter_ in product(Ax_matrix_operations, dtype_tolerances, sparse_matrix_types, rhs_vector_getters):
dt_ = d_t_[0]
tol_ = d_t_[1]
def factory(dt, tol, sparse_type, vector_getter, numpy_op, vcl_op):
def _test():
vcl_A = get_sparse_matrix(size, sparsity, dt, sparse_type)
numpy_A = vcl_A.as_ndarray()
numpy_x, vcl_x = vector_getter(size, dt)
numpy_b = numpy_op(numpy_A, numpy_x)
vcl_b = vcl_op(vcl_A, vcl_x)
# compare with numpy_solution
act_diff = math.fabs(diff(numpy_b, vcl_b))
assert act_diff <= tol, "diff was {} > tolerance {}".format(act_diff, tol)
return _test
exec("test_%s_%s_A_%s_x_%s = factory(p.%s, %g, %s, %s, %s, %s)" % (op_[0], sparse_type_[0], vector_getter_[0], dt_, dt_, tol_, sparse_type_[1], vector_getter_[1], op_[1], op_[2]))
|
<commit_before><commit_msg>Add sparse matrix operations test<commit_after>from __future__ import division
import math
import numpy as np
import pyviennacl as p
import scipy.sparse.linalg as spspla
from _common import *
from itertools import product
size, sparsity = 20, 0.1
dtype_tolerances = [('float32', 1.0E-3), ('float64', 1.0E-8)]
dense_matrix_getters = [('matrix', 'get_matrix'),
('matrix_range', 'get_matrix_range'),
('matrix_slice', 'get_matrix_slice'),
('matrix_trans', 'get_matrix_trans'),
('matrix_range_trans', 'get_matrix_range_trans'),
('matrix_slice_trans', 'get_matrix_slice_trans')]
sparse_matrix_types = [('compressed_matrix', 'p.CompressedMatrix'),
('coordinate_matrix', 'p.CoordinateMatrix'),
('ell_matrix', 'p.ELLMatrix'),
('hyb_matrix', 'p.HybridMatrix')]
rhs_vector_getters = [('vector', 'get_vector')] #,
#('vector_range', 'get_vector_range'),
#('vector_slice', 'get_vector_slice')]
Ax_matrix_operations = [
('dgemv', 'dot', 'dot')
]
for op_, d_t_, sparse_type_, vector_getter_ in product(Ax_matrix_operations, dtype_tolerances, sparse_matrix_types, rhs_vector_getters):
dt_ = d_t_[0]
tol_ = d_t_[1]
def factory(dt, tol, sparse_type, vector_getter, numpy_op, vcl_op):
def _test():
vcl_A = get_sparse_matrix(size, sparsity, dt, sparse_type)
numpy_A = vcl_A.as_ndarray()
numpy_x, vcl_x = vector_getter(size, dt)
numpy_b = numpy_op(numpy_A, numpy_x)
vcl_b = vcl_op(vcl_A, vcl_x)
# compare with numpy_solution
act_diff = math.fabs(diff(numpy_b, vcl_b))
assert act_diff <= tol, "diff was {} > tolerance {}".format(act_diff, tol)
return _test
exec("test_%s_%s_A_%s_x_%s = factory(p.%s, %g, %s, %s, %s, %s)" % (op_[0], sparse_type_[0], vector_getter_[0], dt_, dt_, tol_, sparse_type_[1], vector_getter_[1], op_[1], op_[2]))
|
|
a020e8c35c35a3e0ec16b42abd62ee689351b7f6
|
tests/functional/test_truststore.py
|
tests/functional/test_truststore.py
|
import sys
from typing import Any, Callable
import pytest
from tests.lib import PipTestEnvironment, TestPipResult
PipRunner = Callable[..., TestPipResult]
@pytest.fixture()
def pip(script: PipTestEnvironment) -> PipRunner:
def pip(*args: str, **kwargs: Any) -> TestPipResult:
return script.pip(*args, "--use-feature=truststore", **kwargs)
return pip
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="3.10 can run truststore")
def test_truststore_error_on_old_python(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert "The truststore feature is only available for Python 3.10+" in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
def test_truststore_error_without_preinstalled(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert (
"To use the truststore feature, 'truststore' must be installed into "
"pip's current environment."
) in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
@pytest.mark.network
@pytest.mark.parametrize(
"package",
[
"INITools",
"https://github.com/pypa/pip-test-package/archive/refs/heads/master.zip",
],
ids=["PyPI", "GitHub"],
)
def test_trustore_can_install(
script: PipTestEnvironment,
pip: PipRunner,
package: str,
) -> None:
script.pip("install", "truststore")
result = pip("install", package)
assert "Successfully installed" in result.stdout
|
Add very simple tests to ensure feature is enabled
|
Add very simple tests to ensure feature is enabled
|
Python
|
mit
|
pfmoore/pip,pradyunsg/pip,pradyunsg/pip,sbidoul/pip,pfmoore/pip,sbidoul/pip,pypa/pip,pypa/pip
|
Add very simple tests to ensure feature is enabled
|
import sys
from typing import Any, Callable
import pytest
from tests.lib import PipTestEnvironment, TestPipResult
PipRunner = Callable[..., TestPipResult]
@pytest.fixture()
def pip(script: PipTestEnvironment) -> PipRunner:
def pip(*args: str, **kwargs: Any) -> TestPipResult:
return script.pip(*args, "--use-feature=truststore", **kwargs)
return pip
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="3.10 can run truststore")
def test_truststore_error_on_old_python(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert "The truststore feature is only available for Python 3.10+" in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
def test_truststore_error_without_preinstalled(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert (
"To use the truststore feature, 'truststore' must be installed into "
"pip's current environment."
) in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
@pytest.mark.network
@pytest.mark.parametrize(
"package",
[
"INITools",
"https://github.com/pypa/pip-test-package/archive/refs/heads/master.zip",
],
ids=["PyPI", "GitHub"],
)
def test_trustore_can_install(
script: PipTestEnvironment,
pip: PipRunner,
package: str,
) -> None:
script.pip("install", "truststore")
result = pip("install", package)
assert "Successfully installed" in result.stdout
|
<commit_before><commit_msg>Add very simple tests to ensure feature is enabled<commit_after>
|
import sys
from typing import Any, Callable
import pytest
from tests.lib import PipTestEnvironment, TestPipResult
PipRunner = Callable[..., TestPipResult]
@pytest.fixture()
def pip(script: PipTestEnvironment) -> PipRunner:
def pip(*args: str, **kwargs: Any) -> TestPipResult:
return script.pip(*args, "--use-feature=truststore", **kwargs)
return pip
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="3.10 can run truststore")
def test_truststore_error_on_old_python(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert "The truststore feature is only available for Python 3.10+" in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
def test_truststore_error_without_preinstalled(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert (
"To use the truststore feature, 'truststore' must be installed into "
"pip's current environment."
) in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
@pytest.mark.network
@pytest.mark.parametrize(
"package",
[
"INITools",
"https://github.com/pypa/pip-test-package/archive/refs/heads/master.zip",
],
ids=["PyPI", "GitHub"],
)
def test_trustore_can_install(
script: PipTestEnvironment,
pip: PipRunner,
package: str,
) -> None:
script.pip("install", "truststore")
result = pip("install", package)
assert "Successfully installed" in result.stdout
|
Add very simple tests to ensure feature is enabledimport sys
from typing import Any, Callable
import pytest
from tests.lib import PipTestEnvironment, TestPipResult
PipRunner = Callable[..., TestPipResult]
@pytest.fixture()
def pip(script: PipTestEnvironment) -> PipRunner:
def pip(*args: str, **kwargs: Any) -> TestPipResult:
return script.pip(*args, "--use-feature=truststore", **kwargs)
return pip
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="3.10 can run truststore")
def test_truststore_error_on_old_python(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert "The truststore feature is only available for Python 3.10+" in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
def test_truststore_error_without_preinstalled(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert (
"To use the truststore feature, 'truststore' must be installed into "
"pip's current environment."
) in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
@pytest.mark.network
@pytest.mark.parametrize(
"package",
[
"INITools",
"https://github.com/pypa/pip-test-package/archive/refs/heads/master.zip",
],
ids=["PyPI", "GitHub"],
)
def test_trustore_can_install(
script: PipTestEnvironment,
pip: PipRunner,
package: str,
) -> None:
script.pip("install", "truststore")
result = pip("install", package)
assert "Successfully installed" in result.stdout
|
<commit_before><commit_msg>Add very simple tests to ensure feature is enabled<commit_after>import sys
from typing import Any, Callable
import pytest
from tests.lib import PipTestEnvironment, TestPipResult
PipRunner = Callable[..., TestPipResult]
@pytest.fixture()
def pip(script: PipTestEnvironment) -> PipRunner:
def pip(*args: str, **kwargs: Any) -> TestPipResult:
return script.pip(*args, "--use-feature=truststore", **kwargs)
return pip
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="3.10 can run truststore")
def test_truststore_error_on_old_python(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert "The truststore feature is only available for Python 3.10+" in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
def test_truststore_error_without_preinstalled(pip: PipRunner) -> None:
result = pip(
"install",
"--no-index",
"does-not-matter",
expect_error=True,
)
assert (
"To use the truststore feature, 'truststore' must be installed into "
"pip's current environment."
) in result.stderr
@pytest.mark.skipif(sys.version_info < (3, 10), reason="3.10+ required for truststore")
@pytest.mark.network
@pytest.mark.parametrize(
"package",
[
"INITools",
"https://github.com/pypa/pip-test-package/archive/refs/heads/master.zip",
],
ids=["PyPI", "GitHub"],
)
def test_trustore_can_install(
script: PipTestEnvironment,
pip: PipRunner,
package: str,
) -> None:
script.pip("install", "truststore")
result = pip("install", package)
assert "Successfully installed" in result.stdout
|
|
8e9e5c7965d50113dda4e69c9ec0248cd3db0037
|
nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
|
nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
Drop unused PostgreSQL sequences from Folsom.
|
Drop unused PostgreSQL sequences from Folsom.
In Folsom the snapshots and volumes tables were converted to use
UUID's. When we performed this conversion in Folsom a couple of
unused PostgreSQL sequences were left behind.
This migration removes the snapshots_id_seq and volumes_id_seq
if they exist in the schema when using PostgreSQL.
Fixes LP Bug #1080786.
Change-Id: I075e2afebcb5236f96ab5d6ab13e249d078da86b
|
Python
|
apache-2.0
|
klmitch/nova,shahar-stratoscale/nova,cernops/nova,rajalokan/nova,aristanetworks/arista-ovs-nova,adelina-t/nova,jianghuaw/nova,berrange/nova,vmturbo/nova,eayunstack/nova,blueboxgroup/nova,eonpatapon/nova,watonyweng/nova,Francis-Liu/animated-broccoli,eonpatapon/nova,ruslanloman/nova,houshengbo/nova_vmware_compute_driver,Francis-Liu/animated-broccoli,MountainWei/nova,leilihh/novaha,mgagne/nova,sacharya/nova,vmturbo/nova,BeyondTheClouds/nova,TieWei/nova,gooddata/openstack-nova,dstroppa/openstack-smartos-nova-grizzly,fajoy/nova,alexandrucoman/vbox-nova-driver,maoy/zknova,tudorvio/nova,DirectXMan12/nova-hacking,rickerc/nova_audit,alexandrucoman/vbox-nova-driver,hanlind/nova,Stavitsky/nova,sridevikoushik31/nova,edulramirez/nova,isyippee/nova,badock/nova,maheshp/novatest,openstack/nova,ted-gould/nova,Juniper/nova,citrix-openstack-build/nova,Metaswitch/calico-nova,dims/nova,sridevikoushik31/nova,spring-week-topos/nova-week,kimjaejoong/nova,sacharya/nova,citrix-openstack-build/nova,openstack/nova,bclau/nova,JianyuWang/nova,ted-gould/nova,cyx1231st/nova,Juniper/nova,mikalstill/nova,imsplitbit/nova,BeyondTheClouds/nova,scripnichenko/nova,cloudbau/nova,apporc/nova,zzicewind/nova,ewindisch/nova,redhat-openstack/nova,mmnelemane/nova,affo/nova,spring-week-topos/nova-week,mahak/nova,bclau/nova,tanglei528/nova,imsplitbit/nova,leilihh/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,petrutlucian94/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,mikalstill/nova,qwefi/nova,cloudbase/nova-virtualbox,leilihh/nova,projectcalico/calico-nova,gspilio/nova,Juniper/nova,zhimin711/nova,mmnelemane/nova,houshengbo/nova_vmware_compute_driver,angdraug/nova,scripnichenko/nova,saleemjaveds/https-github.com-openstack-nova,virtualopensystems/nova,jianghuaw/nova,klmitch/nova,vmturbo/nova,OpenAcademy-OpenStack/nova-scheduler,openstack/nova,gooddata/openstack-nova,badock/nova,bigswitch/nova,luogangyi/bcec-nova,LoHChina/nova,zaina/nova,jianghuaw/nova,tealover/nova,tealover/nova,CiscoSystems/nova,tangfeixiong/nova,sridevikoushik31/openstack,aristanetworks/arista-ovs-nova,tudorvio/nova,sebrandon1/nova,luogangyi/bcec-nova,shootstar/novatest,thomasem/nova,JioCloud/nova_test_latest,klmitch/nova,bgxavier/nova,CloudServer/nova,rahulunair/nova,watonyweng/nova,Juniper/nova,devendermishrajio/nova_test_latest,yatinkumbhare/openstack-nova,saleemjaveds/https-github.com-openstack-nova,cernops/nova,hanlind/nova,double12gzh/nova,yosshy/nova,Tehsmash/nova,gooddata/openstack-nova,nikesh-mahalka/nova,yrobla/nova,shahar-stratoscale/nova,CEG-FYP-OpenStack/scheduler,LoHChina/nova,tianweizhang/nova,dstroppa/openstack-smartos-nova-grizzly,noironetworks/nova,Triv90/Nova,isyippee/nova,gspilio/nova,apporc/nova,eharney/nova,dawnpower/nova,NeCTAR-RC/nova,rickerc/nova_audit,takeshineshiro/nova,CiscoSystems/nova,projectcalico/calico-nova,maelnor/nova,petrutlucian94/nova,iuliat/nova,gspilio/nova,mikalstill/nova,rrader/nova-docker-plugin,thomasem/nova,CloudServer/nova,mgagne/nova,mandeepdhami/nova,zzicewind/nova,BeyondTheClouds/nova,qwefi/nova,TwinkleChawla/nova,sridevikoushik31/nova,Triv90/Nova,CCI-MOC/nova,Tehsmash/nova,rahulunair/nova,ewindisch/nova,eharney/nova,Yusuke1987/openstack_template,rahulunair/nova,silenceli/nova,tangfeixiong/nova,jeffrey4l/nova,devendermishrajio/nova_test_latest,DirectXMan12/nova-hacking,gooddata/openstack-nova,shail2810/nova,cyx1231st/nova,barnsnake351/nova,Yuriy-Leonov/nova,JianyuWang/nova,hanlind/nova,tanglei528/nova,JioCloud/nova,JioCloud/nova_test_latest,yrobla/nova,Yusuke1987/openstack_template,dims/nova,petrutlucian94/nova_dev,fajoy/nova,bigswitch/nova,mahak/nova,joker946/nova,devendermishrajio/nova,felixma/nova,TieWei/nova,vladikr/nova_drafts,jianghuaw/nova,viggates/nova,JioCloud/nova,viggates/nova,shootstar/novatest,affo/nova,phenoxim/nova,SUSE-Cloud/nova,alaski/nova,belmiromoreira/nova,j-carpentier/nova,sebrandon1/nova,rrader/nova-docker-plugin,OpenAcademy-OpenStack/nova-scheduler,vmturbo/nova,orbitfp7/nova,akash1808/nova_test_latest,houshengbo/nova_vmware_compute_driver,fajoy/nova,sridevikoushik31/nova,maheshp/novatest,alvarolopez/nova,silenceli/nova,aristanetworks/arista-ovs-nova,orbitfp7/nova,Triv90/Nova,devendermishrajio/nova,varunarya10/nova_test_latest,alvarolopez/nova,akash1808/nova,cloudbase/nova,takeshineshiro/nova,vladikr/nova_drafts,nikesh-mahalka/nova,plumgrid/plumgrid-nova,maheshp/novatest,belmiromoreira/nova,zaina/nova,Yuriy-Leonov/nova,ntt-sic/nova,maelnor/nova,petrutlucian94/nova_dev,akash1808/nova,cernops/nova,whitepages/nova,fnordahl/nova,cloudbase/nova,blueboxgroup/nova,cloudbase/nova-virtualbox,CCI-MOC/nova,phenoxim/nova,shail2810/nova,dawnpower/nova,barnsnake351/nova,yosshy/nova,MountainWei/nova,rajalokan/nova,SUSE-Cloud/nova,zhimin711/nova,virtualopensystems/nova,alaski/nova,iuliat/nova,maoy/zknova,redhat-openstack/nova,raildo/nova,Stavitsky/nova,kimjaejoong/nova,yatinkumbhare/openstack-nova,jeffrey4l/nova,cloudbau/nova,leilihh/novaha,devoid/nova,raildo/nova,Metaswitch/calico-nova,angdraug/nova,CEG-FYP-OpenStack/scheduler,whitepages/nova,tianweizhang/nova,yrobla/nova,noironetworks/nova,klmitch/nova,adelina-t/nova,edulramirez/nova,NeCTAR-RC/nova,plumgrid/plumgrid-nova,DirectXMan12/nova-hacking,felixma/nova,varunarya10/nova_test_latest,ruslanloman/nova,devoid/nova,joker946/nova,sridevikoushik31/openstack,double12gzh/nova,rajalokan/nova,bgxavier/nova,j-carpentier/nova,berrange/nova,eayunstack/nova,cloudbase/nova,sridevikoushik31/openstack,mahak/nova,fnordahl/nova,dstroppa/openstack-smartos-nova-grizzly,sebrandon1/nova,ntt-sic/nova,mandeepdhami/nova,maoy/zknova,rajalokan/nova,TwinkleChawla/nova,akash1808/nova_test_latest
|
Drop unused PostgreSQL sequences from Folsom.
In Folsom the snapshots and volumes tables were converted to use
UUID's. When we performed this conversion in Folsom a couple of
unused PostgreSQL sequences were left behind.
This migration removes the snapshots_id_seq and volumes_id_seq
if they exist in the schema when using PostgreSQL.
Fixes LP Bug #1080786.
Change-Id: I075e2afebcb5236f96ab5d6ab13e249d078da86b
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
<commit_before><commit_msg>Drop unused PostgreSQL sequences from Folsom.
In Folsom the snapshots and volumes tables were converted to use
UUID's. When we performed this conversion in Folsom a couple of
unused PostgreSQL sequences were left behind.
This migration removes the snapshots_id_seq and volumes_id_seq
if they exist in the schema when using PostgreSQL.
Fixes LP Bug #1080786.
Change-Id: I075e2afebcb5236f96ab5d6ab13e249d078da86b<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
Drop unused PostgreSQL sequences from Folsom.
In Folsom the snapshots and volumes tables were converted to use
UUID's. When we performed this conversion in Folsom a couple of
unused PostgreSQL sequences were left behind.
This migration removes the snapshots_id_seq and volumes_id_seq
if they exist in the schema when using PostgreSQL.
Fixes LP Bug #1080786.
Change-Id: I075e2afebcb5236f96ab5d6ab13e249d078da86b# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
<commit_before><commit_msg>Drop unused PostgreSQL sequences from Folsom.
In Folsom the snapshots and volumes tables were converted to use
UUID's. When we performed this conversion in Folsom a couple of
unused PostgreSQL sequences were left behind.
This migration removes the snapshots_id_seq and volumes_id_seq
if they exist in the schema when using PostgreSQL.
Fixes LP Bug #1080786.
Change-Id: I075e2afebcb5236f96ab5d6ab13e249d078da86b<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
|
5e2000852933da680af03e002f094ff9a5e7bc25
|
txircd/modules/extra/conn_umodes.py
|
txircd/modules/extra/conn_umodes.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoUserModes(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoUserModes"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoSetUserModes) ]
def autoSetUserModes(self, user):
try:
modes = self.ircd.config["client_umodes_on_connect"]
params = modes.split()
modes = params.pop(0)
user.setModes(self.ircd.serverID, modes, params)
except KeyError:
pass # No umodes defined. No action required.
autoUserModes = AutoUserModes()
|
Implement the usermodes on connect module
|
Implement the usermodes on connect module
|
Python
|
bsd-3-clause
|
Heufneutje/txircd,ElementalAlchemist/txircd
|
Implement the usermodes on connect module
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoUserModes(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoUserModes"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoSetUserModes) ]
def autoSetUserModes(self, user):
try:
modes = self.ircd.config["client_umodes_on_connect"]
params = modes.split()
modes = params.pop(0)
user.setModes(self.ircd.serverID, modes, params)
except KeyError:
pass # No umodes defined. No action required.
autoUserModes = AutoUserModes()
|
<commit_before><commit_msg>Implement the usermodes on connect module<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoUserModes(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoUserModes"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoSetUserModes) ]
def autoSetUserModes(self, user):
try:
modes = self.ircd.config["client_umodes_on_connect"]
params = modes.split()
modes = params.pop(0)
user.setModes(self.ircd.serverID, modes, params)
except KeyError:
pass # No umodes defined. No action required.
autoUserModes = AutoUserModes()
|
Implement the usermodes on connect modulefrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoUserModes(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoUserModes"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoSetUserModes) ]
def autoSetUserModes(self, user):
try:
modes = self.ircd.config["client_umodes_on_connect"]
params = modes.split()
modes = params.pop(0)
user.setModes(self.ircd.serverID, modes, params)
except KeyError:
pass # No umodes defined. No action required.
autoUserModes = AutoUserModes()
|
<commit_before><commit_msg>Implement the usermodes on connect module<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoUserModes(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoUserModes"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoSetUserModes) ]
def autoSetUserModes(self, user):
try:
modes = self.ircd.config["client_umodes_on_connect"]
params = modes.split()
modes = params.pop(0)
user.setModes(self.ircd.serverID, modes, params)
except KeyError:
pass # No umodes defined. No action required.
autoUserModes = AutoUserModes()
|
|
94c7de8340f8cfb630c09d86a709e62436ce7d9e
|
packages/Python/lldbsuite/test/repl/type_lookup/TestREPLTypeLookup.py
|
packages/Python/lldbsuite/test/repl/type_lookup/TestREPLTypeLookup.py
|
# TestREPLTypeLookup.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""Test that type lookup chooses the right language"""
import lldbsuite.test.lldbrepl as lldbrepl
import lldbsuite.test.decorators as decorators
class REPLTypeLookupTestCase(lldbrepl.REPLTest):
mydir = lldbrepl.REPLTest.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipUnlessDarwin
@decorators.no_debug_info_test
def doTest(self):
self.command(
':type lookup NSArchiver',
patterns=['@interface NSArchiver']) # no Swift info, ObjC
self.command('import Foundation')
self.command(
':type lookup NSArchiver',
patterns=['class NSArchiver']) # Swift info, no ObjC
|
Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL
|
Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL
|
Python
|
apache-2.0
|
apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb
|
Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL
|
# TestREPLTypeLookup.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""Test that type lookup chooses the right language"""
import lldbsuite.test.lldbrepl as lldbrepl
import lldbsuite.test.decorators as decorators
class REPLTypeLookupTestCase(lldbrepl.REPLTest):
mydir = lldbrepl.REPLTest.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipUnlessDarwin
@decorators.no_debug_info_test
def doTest(self):
self.command(
':type lookup NSArchiver',
patterns=['@interface NSArchiver']) # no Swift info, ObjC
self.command('import Foundation')
self.command(
':type lookup NSArchiver',
patterns=['class NSArchiver']) # Swift info, no ObjC
|
<commit_before><commit_msg>Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL<commit_after>
|
# TestREPLTypeLookup.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""Test that type lookup chooses the right language"""
import lldbsuite.test.lldbrepl as lldbrepl
import lldbsuite.test.decorators as decorators
class REPLTypeLookupTestCase(lldbrepl.REPLTest):
mydir = lldbrepl.REPLTest.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipUnlessDarwin
@decorators.no_debug_info_test
def doTest(self):
self.command(
':type lookup NSArchiver',
patterns=['@interface NSArchiver']) # no Swift info, ObjC
self.command('import Foundation')
self.command(
':type lookup NSArchiver',
patterns=['class NSArchiver']) # Swift info, no ObjC
|
Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL# TestREPLTypeLookup.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""Test that type lookup chooses the right language"""
import lldbsuite.test.lldbrepl as lldbrepl
import lldbsuite.test.decorators as decorators
class REPLTypeLookupTestCase(lldbrepl.REPLTest):
mydir = lldbrepl.REPLTest.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipUnlessDarwin
@decorators.no_debug_info_test
def doTest(self):
self.command(
':type lookup NSArchiver',
patterns=['@interface NSArchiver']) # no Swift info, ObjC
self.command('import Foundation')
self.command(
':type lookup NSArchiver',
patterns=['class NSArchiver']) # Swift info, no ObjC
|
<commit_before><commit_msg>Add a test case to validate that 'type lookup' picks Swift vs. ObjC properly in the REPL<commit_after># TestREPLTypeLookup.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""Test that type lookup chooses the right language"""
import lldbsuite.test.lldbrepl as lldbrepl
import lldbsuite.test.decorators as decorators
class REPLTypeLookupTestCase(lldbrepl.REPLTest):
mydir = lldbrepl.REPLTest.compute_mydir(__file__)
@decorators.swiftTest
@decorators.skipUnlessDarwin
@decorators.no_debug_info_test
def doTest(self):
self.command(
':type lookup NSArchiver',
patterns=['@interface NSArchiver']) # no Swift info, ObjC
self.command('import Foundation')
self.command(
':type lookup NSArchiver',
patterns=['class NSArchiver']) # Swift info, no ObjC
|
|
8745aaa16568ff4e0954c7e0351665d5c1eb9e01
|
test/test_rmf3_dump.py
|
test/test_rmf3_dump.py
|
import unittest
import RMF
import subprocess
class Tests(unittest.TestCase):
def test_help(self):
"""Test rmf3_dump --help"""
p = subprocess.Popen(['rmf3_dump', '--help'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("Dump frames from an rmf3 file", err)
self.assertEqual(p.returncode, 1)
def test_version(self):
"""Test rmf3_dump --version"""
p = subprocess.Popen(['rmf3_dump', '--version'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("RMF version", err)
self.assertEqual(p.returncode, 0)
if __name__ == '__main__':
unittest.main()
|
Add a basic test for rmf3_dump
|
Add a basic test for rmf3_dump
|
Python
|
apache-2.0
|
salilab/rmf,salilab/rmf,salilab/rmf,salilab/rmf
|
Add a basic test for rmf3_dump
|
import unittest
import RMF
import subprocess
class Tests(unittest.TestCase):
def test_help(self):
"""Test rmf3_dump --help"""
p = subprocess.Popen(['rmf3_dump', '--help'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("Dump frames from an rmf3 file", err)
self.assertEqual(p.returncode, 1)
def test_version(self):
"""Test rmf3_dump --version"""
p = subprocess.Popen(['rmf3_dump', '--version'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("RMF version", err)
self.assertEqual(p.returncode, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a basic test for rmf3_dump<commit_after>
|
import unittest
import RMF
import subprocess
class Tests(unittest.TestCase):
def test_help(self):
"""Test rmf3_dump --help"""
p = subprocess.Popen(['rmf3_dump', '--help'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("Dump frames from an rmf3 file", err)
self.assertEqual(p.returncode, 1)
def test_version(self):
"""Test rmf3_dump --version"""
p = subprocess.Popen(['rmf3_dump', '--version'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("RMF version", err)
self.assertEqual(p.returncode, 0)
if __name__ == '__main__':
unittest.main()
|
Add a basic test for rmf3_dumpimport unittest
import RMF
import subprocess
class Tests(unittest.TestCase):
def test_help(self):
"""Test rmf3_dump --help"""
p = subprocess.Popen(['rmf3_dump', '--help'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("Dump frames from an rmf3 file", err)
self.assertEqual(p.returncode, 1)
def test_version(self):
"""Test rmf3_dump --version"""
p = subprocess.Popen(['rmf3_dump', '--version'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("RMF version", err)
self.assertEqual(p.returncode, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a basic test for rmf3_dump<commit_after>import unittest
import RMF
import subprocess
class Tests(unittest.TestCase):
def test_help(self):
"""Test rmf3_dump --help"""
p = subprocess.Popen(['rmf3_dump', '--help'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("Dump frames from an rmf3 file", err)
self.assertEqual(p.returncode, 1)
def test_version(self):
"""Test rmf3_dump --version"""
p = subprocess.Popen(['rmf3_dump', '--version'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
self.assertEqual(out, "")
self.assertIn("RMF version", err)
self.assertEqual(p.returncode, 0)
if __name__ == '__main__':
unittest.main()
|
|
a3664120d3606e0257b79da141313752cf347953
|
write_serial.py
|
write_serial.py
|
#!/usr/bin/env python
from __future__ import print_function
import serial
import time
import json
def main():
with open('data.json') as fh:
values = json.load(fh)
with serial.Serial('/dev/tty.usbmodem1411', 9600) as port:
start_time = time.time()
for value in values:
while time.time() - start_time < value['time_offset']:
pass
port.write('%d\n' % value['value'])
print('Wrote: %d' % (value['value'],))
if __name__ == '__main__':
main()
|
Add script to write serial capture
|
Add script to write serial capture
|
Python
|
mit
|
thusoy/hhi-experiments
|
Add script to write serial capture
|
#!/usr/bin/env python
from __future__ import print_function
import serial
import time
import json
def main():
with open('data.json') as fh:
values = json.load(fh)
with serial.Serial('/dev/tty.usbmodem1411', 9600) as port:
start_time = time.time()
for value in values:
while time.time() - start_time < value['time_offset']:
pass
port.write('%d\n' % value['value'])
print('Wrote: %d' % (value['value'],))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to write serial capture<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import serial
import time
import json
def main():
with open('data.json') as fh:
values = json.load(fh)
with serial.Serial('/dev/tty.usbmodem1411', 9600) as port:
start_time = time.time()
for value in values:
while time.time() - start_time < value['time_offset']:
pass
port.write('%d\n' % value['value'])
print('Wrote: %d' % (value['value'],))
if __name__ == '__main__':
main()
|
Add script to write serial capture#!/usr/bin/env python
from __future__ import print_function
import serial
import time
import json
def main():
with open('data.json') as fh:
values = json.load(fh)
with serial.Serial('/dev/tty.usbmodem1411', 9600) as port:
start_time = time.time()
for value in values:
while time.time() - start_time < value['time_offset']:
pass
port.write('%d\n' % value['value'])
print('Wrote: %d' % (value['value'],))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to write serial capture<commit_after>#!/usr/bin/env python
from __future__ import print_function
import serial
import time
import json
def main():
with open('data.json') as fh:
values = json.load(fh)
with serial.Serial('/dev/tty.usbmodem1411', 9600) as port:
start_time = time.time()
for value in values:
while time.time() - start_time < value['time_offset']:
pass
port.write('%d\n' % value['value'])
print('Wrote: %d' % (value['value'],))
if __name__ == '__main__':
main()
|
|
6994ba541f755abe427e032f9dd4991bf13f1f49
|
pykmer/exset.py
|
pykmer/exset.py
|
"""
This module provides an API for reading and writing sets (sorted lists)
of *k*-mers in an uncompressed form using the builtin Python array type.
It is good for small sets of *k*-mers, where the compression/decompression
overhead outweighs the space benefits.
"""
__docformat__ = 'restructuredtext'
import array
import struct
def write(k, xs, nm):
"""
Write the sorted array of *k*-mers `xs` to the file named `nm`.
"""
with open(nm, 'wb') as f:
s = struct.pack('QQ', k, len(xs))
f.write(s)
xs.tofile(f)
def read(nm):
"""
Open the file `nm` and read from it a sorted array of *k*-mers,
and return the metadata (i.e. K), and the array of *k*-mers.
"""
with open(nm, 'rb') as f:
s = f.read(16)
(k, n) = struct.unpack('QQ', s)
meta = {'K':k}
a = array.array('L', [])
a.fromfile(f, n)
return (meta, a)
|
Add an expanded k-mer set.
|
Add an expanded k-mer set.
|
Python
|
apache-2.0
|
drtconway/pykmer
|
Add an expanded k-mer set.
|
"""
This module provides an API for reading and writing sets (sorted lists)
of *k*-mers in an uncompressed form using the builtin Python array type.
It is good for small sets of *k*-mers, where the compression/decompression
overhead outweighs the space benefits.
"""
__docformat__ = 'restructuredtext'
import array
import struct
def write(k, xs, nm):
"""
Write the sorted array of *k*-mers `xs` to the file named `nm`.
"""
with open(nm, 'wb') as f:
s = struct.pack('QQ', k, len(xs))
f.write(s)
xs.tofile(f)
def read(nm):
"""
Open the file `nm` and read from it a sorted array of *k*-mers,
and return the metadata (i.e. K), and the array of *k*-mers.
"""
with open(nm, 'rb') as f:
s = f.read(16)
(k, n) = struct.unpack('QQ', s)
meta = {'K':k}
a = array.array('L', [])
a.fromfile(f, n)
return (meta, a)
|
<commit_before><commit_msg>Add an expanded k-mer set.<commit_after>
|
"""
This module provides an API for reading and writing sets (sorted lists)
of *k*-mers in an uncompressed form using the builtin Python array type.
It is good for small sets of *k*-mers, where the compression/decompression
overhead outweighs the space benefits.
"""
__docformat__ = 'restructuredtext'
import array
import struct
def write(k, xs, nm):
"""
Write the sorted array of *k*-mers `xs` to the file named `nm`.
"""
with open(nm, 'wb') as f:
s = struct.pack('QQ', k, len(xs))
f.write(s)
xs.tofile(f)
def read(nm):
"""
Open the file `nm` and read from it a sorted array of *k*-mers,
and return the metadata (i.e. K), and the array of *k*-mers.
"""
with open(nm, 'rb') as f:
s = f.read(16)
(k, n) = struct.unpack('QQ', s)
meta = {'K':k}
a = array.array('L', [])
a.fromfile(f, n)
return (meta, a)
|
Add an expanded k-mer set."""
This module provides an API for reading and writing sets (sorted lists)
of *k*-mers in an uncompressed form using the builtin Python array type.
It is good for small sets of *k*-mers, where the compression/decompression
overhead outweighs the space benefits.
"""
__docformat__ = 'restructuredtext'
import array
import struct
def write(k, xs, nm):
"""
Write the sorted array of *k*-mers `xs` to the file named `nm`.
"""
with open(nm, 'wb') as f:
s = struct.pack('QQ', k, len(xs))
f.write(s)
xs.tofile(f)
def read(nm):
"""
Open the file `nm` and read from it a sorted array of *k*-mers,
and return the metadata (i.e. K), and the array of *k*-mers.
"""
with open(nm, 'rb') as f:
s = f.read(16)
(k, n) = struct.unpack('QQ', s)
meta = {'K':k}
a = array.array('L', [])
a.fromfile(f, n)
return (meta, a)
|
<commit_before><commit_msg>Add an expanded k-mer set.<commit_after>"""
This module provides an API for reading and writing sets (sorted lists)
of *k*-mers in an uncompressed form using the builtin Python array type.
It is good for small sets of *k*-mers, where the compression/decompression
overhead outweighs the space benefits.
"""
__docformat__ = 'restructuredtext'
import array
import struct
def write(k, xs, nm):
"""
Write the sorted array of *k*-mers `xs` to the file named `nm`.
"""
with open(nm, 'wb') as f:
s = struct.pack('QQ', k, len(xs))
f.write(s)
xs.tofile(f)
def read(nm):
"""
Open the file `nm` and read from it a sorted array of *k*-mers,
and return the metadata (i.e. K), and the array of *k*-mers.
"""
with open(nm, 'rb') as f:
s = f.read(16)
(k, n) = struct.unpack('QQ', s)
meta = {'K':k}
a = array.array('L', [])
a.fromfile(f, n)
return (meta, a)
|
|
dba74cdd2fb2a8e5be1b56bba3fdcadc40827f73
|
links/utils/testing_helpers.py
|
links/utils/testing_helpers.py
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
class AuthenticatedAPITestCase(APITestCase):
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
response = self.client.post(reverse('registration'), {
'email': 'test@test.com',
'password': 'something secret',
'first_name': 'Testy',
'last_name': 'McTesterson'
}, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
|
Create some testing helper classes
|
Create some testing helper classes
|
Python
|
mit
|
projectweekend/Links-API,projectweekend/Links-API
|
Create some testing helper classes
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
class AuthenticatedAPITestCase(APITestCase):
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
response = self.client.post(reverse('registration'), {
'email': 'test@test.com',
'password': 'something secret',
'first_name': 'Testy',
'last_name': 'McTesterson'
}, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
|
<commit_before><commit_msg>Create some testing helper classes<commit_after>
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
class AuthenticatedAPITestCase(APITestCase):
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
response = self.client.post(reverse('registration'), {
'email': 'test@test.com',
'password': 'something secret',
'first_name': 'Testy',
'last_name': 'McTesterson'
}, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
|
Create some testing helper classesfrom django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
class AuthenticatedAPITestCase(APITestCase):
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
response = self.client.post(reverse('registration'), {
'email': 'test@test.com',
'password': 'something secret',
'first_name': 'Testy',
'last_name': 'McTesterson'
}, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
|
<commit_before><commit_msg>Create some testing helper classes<commit_after>from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
class APITestCase(TestCase):
def setUp(self):
self.client = APIClient()
class AuthenticatedAPITestCase(APITestCase):
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
response = self.client.post(reverse('registration'), {
'email': 'test@test.com',
'password': 'something secret',
'first_name': 'Testy',
'last_name': 'McTesterson'
}, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
|
|
d1e07f55fd05bdd77ec323b814607deaab03de57
|
tests/lib/test_files.py
|
tests/lib/test_files.py
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
@pytest.mark.parametrize(
"input,expected",
[
(b"foo/bar/baz.igc", u"baz.igc"),
(u"HERR.müller@123.igc", u"herr.m_ller_123.igc"),
(u"abc/...1234.igc", u"1234.igc"),
(u"", u"empty"),
],
)
def test_sanitise_filename(input, expected):
output = files.sanitise_filename(input)
assert is_unicode(output)
assert output == expected
|
Add tests for `sanitise_filename()` function
|
lib/files: Add tests for `sanitise_filename()` function
|
Python
|
agpl-3.0
|
skylines-project/skylines,skylines-project/skylines,skylines-project/skylines,skylines-project/skylines
|
lib/files: Add tests for `sanitise_filename()` function
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
@pytest.mark.parametrize(
"input,expected",
[
(b"foo/bar/baz.igc", u"baz.igc"),
(u"HERR.müller@123.igc", u"herr.m_ller_123.igc"),
(u"abc/...1234.igc", u"1234.igc"),
(u"", u"empty"),
],
)
def test_sanitise_filename(input, expected):
output = files.sanitise_filename(input)
assert is_unicode(output)
assert output == expected
|
<commit_before><commit_msg>lib/files: Add tests for `sanitise_filename()` function<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
@pytest.mark.parametrize(
"input,expected",
[
(b"foo/bar/baz.igc", u"baz.igc"),
(u"HERR.müller@123.igc", u"herr.m_ller_123.igc"),
(u"abc/...1234.igc", u"1234.igc"),
(u"", u"empty"),
],
)
def test_sanitise_filename(input, expected):
output = files.sanitise_filename(input)
assert is_unicode(output)
assert output == expected
|
lib/files: Add tests for `sanitise_filename()` function# -*- coding: utf-8 -*-
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
@pytest.mark.parametrize(
"input,expected",
[
(b"foo/bar/baz.igc", u"baz.igc"),
(u"HERR.müller@123.igc", u"herr.m_ller_123.igc"),
(u"abc/...1234.igc", u"1234.igc"),
(u"", u"empty"),
],
)
def test_sanitise_filename(input, expected):
output = files.sanitise_filename(input)
assert is_unicode(output)
assert output == expected
|
<commit_before><commit_msg>lib/files: Add tests for `sanitise_filename()` function<commit_after># -*- coding: utf-8 -*-
import pytest
from skylines.lib import files
from skylines.lib.types import is_unicode
@pytest.mark.parametrize(
"input,expected",
[
(b"foo/bar/baz.igc", u"baz.igc"),
(u"HERR.müller@123.igc", u"herr.m_ller_123.igc"),
(u"abc/...1234.igc", u"1234.igc"),
(u"", u"empty"),
],
)
def test_sanitise_filename(input, expected):
output = files.sanitise_filename(input)
assert is_unicode(output)
assert output == expected
|
|
fd47b1744567dfdfe9e3787bbc4638ddc30b3ff6
|
tests/preflight_test.py
|
tests/preflight_test.py
|
from unittest import TestCase
from dusty.preflight import _assert_executable_exists, PreflightException
class PreflightTest(TestCase):
def test_assert_executable_exists(self):
_assert_executable_exists('python')
def test_assert_executable_exists_fails(self):
with self.assertRaises(PreflightException):
_assert_executable_exists('somecrazythingwhichforsuredoesnotexist')
|
Add tests for assert executable
|
Add tests for assert executable
|
Python
|
mit
|
gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty
|
Add tests for assert executable
|
from unittest import TestCase
from dusty.preflight import _assert_executable_exists, PreflightException
class PreflightTest(TestCase):
def test_assert_executable_exists(self):
_assert_executable_exists('python')
def test_assert_executable_exists_fails(self):
with self.assertRaises(PreflightException):
_assert_executable_exists('somecrazythingwhichforsuredoesnotexist')
|
<commit_before><commit_msg>Add tests for assert executable<commit_after>
|
from unittest import TestCase
from dusty.preflight import _assert_executable_exists, PreflightException
class PreflightTest(TestCase):
def test_assert_executable_exists(self):
_assert_executable_exists('python')
def test_assert_executable_exists_fails(self):
with self.assertRaises(PreflightException):
_assert_executable_exists('somecrazythingwhichforsuredoesnotexist')
|
Add tests for assert executablefrom unittest import TestCase
from dusty.preflight import _assert_executable_exists, PreflightException
class PreflightTest(TestCase):
def test_assert_executable_exists(self):
_assert_executable_exists('python')
def test_assert_executable_exists_fails(self):
with self.assertRaises(PreflightException):
_assert_executable_exists('somecrazythingwhichforsuredoesnotexist')
|
<commit_before><commit_msg>Add tests for assert executable<commit_after>from unittest import TestCase
from dusty.preflight import _assert_executable_exists, PreflightException
class PreflightTest(TestCase):
def test_assert_executable_exists(self):
_assert_executable_exists('python')
def test_assert_executable_exists_fails(self):
with self.assertRaises(PreflightException):
_assert_executable_exists('somecrazythingwhichforsuredoesnotexist')
|
|
ceb624e34de4165a75874786b34ca509fe865a43
|
tools/telemetry/telemetry/page/actions/action_runner_unittest.py
|
tools/telemetry/telemetry/page/actions/action_runner_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('interaction_enabled_page.html')
action_runner.RunAction(WaitAction({'seconds': 1}))
self._browser.StartTracing(tracing_backend.DEFAULT_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
|
Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
This is a reland of https://codereview.chromium.org/294943006 after it's reverted in
https://codereview.chromium.org/284183014/.
BUG=368767
Review URL: https://codereview.chromium.org/299443017
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272782 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,ltilve/chromium,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,markYoungH/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,M4sse/chromium.src,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,Jonekee/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,ltilve/chromium,M4sse/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,Just-D/chromium-1,M4sse/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,M4sse/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,littlstar/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,jaruba/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,jaruba/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,axinging/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,ltilve/chromium,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src
|
Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
This is a reland of https://codereview.chromium.org/294943006 after it's reverted in
https://codereview.chromium.org/284183014/.
BUG=368767
Review URL: https://codereview.chromium.org/299443017
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272782 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('interaction_enabled_page.html')
action_runner.RunAction(WaitAction({'seconds': 1}))
self._browser.StartTracing(tracing_backend.DEFAULT_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
This is a reland of https://codereview.chromium.org/294943006 after it's reverted in
https://codereview.chromium.org/284183014/.
BUG=368767
Review URL: https://codereview.chromium.org/299443017
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272782 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('interaction_enabled_page.html')
action_runner.RunAction(WaitAction({'seconds': 1}))
self._browser.StartTracing(tracing_backend.DEFAULT_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
This is a reland of https://codereview.chromium.org/294943006 after it's reverted in
https://codereview.chromium.org/284183014/.
BUG=368767
Review URL: https://codereview.chromium.org/299443017
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272782 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('interaction_enabled_page.html')
action_runner.RunAction(WaitAction({'seconds': 1}))
self._browser.StartTracing(tracing_backend.DEFAULT_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add unittests for action_runner.BeginInteraction and action_runner.EndInteraction.
This is a reland of https://codereview.chromium.org/294943006 after it's reverted in
https://codereview.chromium.org/284183014/.
BUG=368767
Review URL: https://codereview.chromium.org/299443017
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272782 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('interaction_enabled_page.html')
action_runner.RunAction(WaitAction({'seconds': 1}))
self._browser.StartTracing(tracing_backend.DEFAULT_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
|
5240a982723eba0df09f4c360f1a1603b25fd3fe
|
py/continuous-subarray-sum.py
|
py/continuous-subarray-sum.py
|
from collections import Counter
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) < 2:
return False
k = abs(k)
if k == 0:
t1, t2 = iter(nums), iter(nums)
t2.next()
return any(x == y == 0 for x, y in zip(t1, t2))
if len(nums) > (k - 1) * 2:
return True
visited = Counter()
prev = None
subsum = 0
for n in nums:
subsum = (subsum + n) % k
if subsum == 0 and prev is not None:
return True
if visited[subsum] >= 2:
return True
elif visited[subsum] == 1 and prev != n:
return True
visited[subsum] += 1
prev = n
return False
|
Add py solution for 523. Continuous Subarray Sum
|
Add py solution for 523. Continuous Subarray Sum
523. Continuous Subarray Sum: https://leetcode.com/problems/continuous-subarray-sum/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 523. Continuous Subarray Sum
523. Continuous Subarray Sum: https://leetcode.com/problems/continuous-subarray-sum/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
from collections import Counter
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) < 2:
return False
k = abs(k)
if k == 0:
t1, t2 = iter(nums), iter(nums)
t2.next()
return any(x == y == 0 for x, y in zip(t1, t2))
if len(nums) > (k - 1) * 2:
return True
visited = Counter()
prev = None
subsum = 0
for n in nums:
subsum = (subsum + n) % k
if subsum == 0 and prev is not None:
return True
if visited[subsum] >= 2:
return True
elif visited[subsum] == 1 and prev != n:
return True
visited[subsum] += 1
prev = n
return False
|
<commit_before><commit_msg>Add py solution for 523. Continuous Subarray Sum
523. Continuous Subarray Sum: https://leetcode.com/problems/continuous-subarray-sum/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>
|
from collections import Counter
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) < 2:
return False
k = abs(k)
if k == 0:
t1, t2 = iter(nums), iter(nums)
t2.next()
return any(x == y == 0 for x, y in zip(t1, t2))
if len(nums) > (k - 1) * 2:
return True
visited = Counter()
prev = None
subsum = 0
for n in nums:
subsum = (subsum + n) % k
if subsum == 0 and prev is not None:
return True
if visited[subsum] >= 2:
return True
elif visited[subsum] == 1 and prev != n:
return True
visited[subsum] += 1
prev = n
return False
|
Add py solution for 523. Continuous Subarray Sum
523. Continuous Subarray Sum: https://leetcode.com/problems/continuous-subarray-sum/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.from collections import Counter
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) < 2:
return False
k = abs(k)
if k == 0:
t1, t2 = iter(nums), iter(nums)
t2.next()
return any(x == y == 0 for x, y in zip(t1, t2))
if len(nums) > (k - 1) * 2:
return True
visited = Counter()
prev = None
subsum = 0
for n in nums:
subsum = (subsum + n) % k
if subsum == 0 and prev is not None:
return True
if visited[subsum] >= 2:
return True
elif visited[subsum] == 1 and prev != n:
return True
visited[subsum] += 1
prev = n
return False
|
<commit_before><commit_msg>Add py solution for 523. Continuous Subarray Sum
523. Continuous Subarray Sum: https://leetcode.com/problems/continuous-subarray-sum/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>from collections import Counter
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) < 2:
return False
k = abs(k)
if k == 0:
t1, t2 = iter(nums), iter(nums)
t2.next()
return any(x == y == 0 for x, y in zip(t1, t2))
if len(nums) > (k - 1) * 2:
return True
visited = Counter()
prev = None
subsum = 0
for n in nums:
subsum = (subsum + n) % k
if subsum == 0 and prev is not None:
return True
if visited[subsum] >= 2:
return True
elif visited[subsum] == 1 and prev != n:
return True
visited[subsum] += 1
prev = n
return False
|
|
fcb2913bd5cb4e25119ae65dd2ff681fe9dfc7eb
|
Spider/spider.py
|
Spider/spider.py
|
from worker import SpiderWorker
from bloomset import BloomSet
from queue import SpiderQueue
import utilities
class Spider:
def __init__(self , baseUrl, numthreads=1, depth=1):
self.baseUrl = baseUrl
self.numthreads = numthreads
self.depth = depth
if not utilities.validateURL(baseUrl):
raise ValueError("ayuhi ")
def start(self):
spiderQueue = SpiderQueue(self.baseUrl, utilities.getDomain(self.baseUrl))
bloomSet = BloomSet(utilities.getDomain(self.baseUrl))
for i in range(self.numthreads):
SpiderWorker(spiderQueue, bloomSet, self.depth)
spiderQueue.join()
spiderQueue.close()
bloomSet.close()
|
Update names and change headers
|
Update names and change headers
|
Python
|
mit
|
goelyash/Spider,goelyash/Spider
|
Update names and change headers
|
from worker import SpiderWorker
from bloomset import BloomSet
from queue import SpiderQueue
import utilities
class Spider:
def __init__(self , baseUrl, numthreads=1, depth=1):
self.baseUrl = baseUrl
self.numthreads = numthreads
self.depth = depth
if not utilities.validateURL(baseUrl):
raise ValueError("ayuhi ")
def start(self):
spiderQueue = SpiderQueue(self.baseUrl, utilities.getDomain(self.baseUrl))
bloomSet = BloomSet(utilities.getDomain(self.baseUrl))
for i in range(self.numthreads):
SpiderWorker(spiderQueue, bloomSet, self.depth)
spiderQueue.join()
spiderQueue.close()
bloomSet.close()
|
<commit_before><commit_msg>Update names and change headers<commit_after>
|
from worker import SpiderWorker
from bloomset import BloomSet
from queue import SpiderQueue
import utilities
class Spider:
def __init__(self , baseUrl, numthreads=1, depth=1):
self.baseUrl = baseUrl
self.numthreads = numthreads
self.depth = depth
if not utilities.validateURL(baseUrl):
raise ValueError("ayuhi ")
def start(self):
spiderQueue = SpiderQueue(self.baseUrl, utilities.getDomain(self.baseUrl))
bloomSet = BloomSet(utilities.getDomain(self.baseUrl))
for i in range(self.numthreads):
SpiderWorker(spiderQueue, bloomSet, self.depth)
spiderQueue.join()
spiderQueue.close()
bloomSet.close()
|
Update names and change headersfrom worker import SpiderWorker
from bloomset import BloomSet
from queue import SpiderQueue
import utilities
class Spider:
def __init__(self , baseUrl, numthreads=1, depth=1):
self.baseUrl = baseUrl
self.numthreads = numthreads
self.depth = depth
if not utilities.validateURL(baseUrl):
raise ValueError("ayuhi ")
def start(self):
spiderQueue = SpiderQueue(self.baseUrl, utilities.getDomain(self.baseUrl))
bloomSet = BloomSet(utilities.getDomain(self.baseUrl))
for i in range(self.numthreads):
SpiderWorker(spiderQueue, bloomSet, self.depth)
spiderQueue.join()
spiderQueue.close()
bloomSet.close()
|
<commit_before><commit_msg>Update names and change headers<commit_after>from worker import SpiderWorker
from bloomset import BloomSet
from queue import SpiderQueue
import utilities
class Spider:
def __init__(self , baseUrl, numthreads=1, depth=1):
self.baseUrl = baseUrl
self.numthreads = numthreads
self.depth = depth
if not utilities.validateURL(baseUrl):
raise ValueError("ayuhi ")
def start(self):
spiderQueue = SpiderQueue(self.baseUrl, utilities.getDomain(self.baseUrl))
bloomSet = BloomSet(utilities.getDomain(self.baseUrl))
for i in range(self.numthreads):
SpiderWorker(spiderQueue, bloomSet, self.depth)
spiderQueue.join()
spiderQueue.close()
bloomSet.close()
|
|
20e02587df6d8c776fa4b045e7004c546f531548
|
tvrenamr/tests/base.py
|
tvrenamr/tests/base.py
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# if `file` isn't there, make it
if not exists(self.files):
mkdir(self.files)
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# if `file` isn't there, make it
if not exists(join(self.path, self.files)):
mkdir(join(self.path, self.files))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
Check and create the tests file folder using an absolute path
|
Check and create the tests file folder using an absolute path
|
Python
|
mit
|
wintersandroid/tvrenamr,ghickman/tvrenamr
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# if `file` isn't there, make it
if not exists(self.files):
mkdir(self.files)
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
Check and create the tests file folder using an absolute path
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# if `file` isn't there, make it
if not exists(join(self.path, self.files)):
mkdir(join(self.path, self.files))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
<commit_before>from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# if `file` isn't there, make it
if not exists(self.files):
mkdir(self.files)
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
<commit_msg>Check and create the tests file folder using an absolute path<commit_after>
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# if `file` isn't there, make it
if not exists(join(self.path, self.files)):
mkdir(join(self.path, self.files))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# if `file` isn't there, make it
if not exists(self.files):
mkdir(self.files)
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
Check and create the tests file folder using an absolute pathfrom os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# if `file` isn't there, make it
if not exists(join(self.path, self.files)):
mkdir(join(self.path, self.files))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
<commit_before>from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# if `file` isn't there, make it
if not exists(self.files):
mkdir(self.files)
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
<commit_msg>Check and create the tests file folder using an absolute path<commit_after>from os import mkdir
from os.path import abspath, dirname, exists, join
from shutil import rmtree
from tvrenamr.config import Config
from tvrenamr.main import TvRenamr
from tvrenamr.tests import mock_requests
# make pyflakes STFU
assert mock_requests
class BaseTest(object):
files = 'tests/files'
organised = 'tests/data/organised'
renamed = 'tests/data/renamed'
def setup(self):
# absolute path to the file is pretty useful
self.path = abspath(dirname(__file__))
# if `file` isn't there, make it
if not exists(join(self.path, self.files)):
mkdir(join(self.path, self.files))
# build the file list
with open(join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
with open(abspath(join(self.files, fn.strip())), 'w') as f:
f.write('')
# instantiate tvr
self.config = Config(join(self.path, 'config.yml'))
self.tv = TvRenamr(self.files, self.config)
def teardown(self):
rmtree(self.files)
|
db15972cb8efc93e4725967415ca3327473d88bf
|
dronesym-python/flask-api/src/node.py
|
dronesym-python/flask-api/src/node.py
|
import requests
import json
apiUrl = 'http://localhost:3000/dronesym/api/node'
def update_drone(id, status):
response = requests.post(apiUrl + '/update/' + str(id), json=status, headers={ 'Content-Type' : 'application/json' })
return response.json()
def get_drone_by_id(id):
response = requests.get(apiUrl + '/get/' + id)
return response.json()
def get_drones():
response = requests.get(apiUrl + '/get')
return response.json()
|
Implement python interface for Node API
|
Implement python interface for Node API
|
Python
|
apache-2.0
|
scorelab/DroneSym,scorelab/DroneSym,scorelab/DroneSym,scorelab/DroneSym,scorelab/DroneSym
|
Implement python interface for Node API
|
import requests
import json
apiUrl = 'http://localhost:3000/dronesym/api/node'
def update_drone(id, status):
response = requests.post(apiUrl + '/update/' + str(id), json=status, headers={ 'Content-Type' : 'application/json' })
return response.json()
def get_drone_by_id(id):
response = requests.get(apiUrl + '/get/' + id)
return response.json()
def get_drones():
response = requests.get(apiUrl + '/get')
return response.json()
|
<commit_before><commit_msg>Implement python interface for Node API<commit_after>
|
import requests
import json
apiUrl = 'http://localhost:3000/dronesym/api/node'
def update_drone(id, status):
response = requests.post(apiUrl + '/update/' + str(id), json=status, headers={ 'Content-Type' : 'application/json' })
return response.json()
def get_drone_by_id(id):
response = requests.get(apiUrl + '/get/' + id)
return response.json()
def get_drones():
response = requests.get(apiUrl + '/get')
return response.json()
|
Implement python interface for Node APIimport requests
import json
apiUrl = 'http://localhost:3000/dronesym/api/node'
def update_drone(id, status):
response = requests.post(apiUrl + '/update/' + str(id), json=status, headers={ 'Content-Type' : 'application/json' })
return response.json()
def get_drone_by_id(id):
response = requests.get(apiUrl + '/get/' + id)
return response.json()
def get_drones():
response = requests.get(apiUrl + '/get')
return response.json()
|
<commit_before><commit_msg>Implement python interface for Node API<commit_after>import requests
import json
apiUrl = 'http://localhost:3000/dronesym/api/node'
def update_drone(id, status):
response = requests.post(apiUrl + '/update/' + str(id), json=status, headers={ 'Content-Type' : 'application/json' })
return response.json()
def get_drone_by_id(id):
response = requests.get(apiUrl + '/get/' + id)
return response.json()
def get_drones():
response = requests.get(apiUrl + '/get')
return response.json()
|
|
3075051d66ed599c4a23c8ddd9f06427b294f461
|
simulator/stream_positions.py
|
simulator/stream_positions.py
|
#!/usr/bin/env python3
#
# Copyright 2015 Secure Systems Group, Aalto University https://se-sy.org/.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script that prints the positions of the uploads in the stream.
Usage:
./stream_positions.py <hash>
Output:
Integer indexes separated with newlines.
"""
import sys
import utils
def main():
hsh = int(sys.argv[-1], 16)
for (i, (upload, size)) in enumerate(utils.read_upload_stream()):
if upload == hsh:
print(i)
if __name__ == "__main__":
main()
|
Add a script for printing file upload moments in the stream
|
Add a script for printing file upload moments in the stream
|
Python
|
apache-2.0
|
sjakthol/dedup-simulator,sjakthol/dedup-simulator
|
Add a script for printing file upload moments in the stream
|
#!/usr/bin/env python3
#
# Copyright 2015 Secure Systems Group, Aalto University https://se-sy.org/.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script that prints the positions of the uploads in the stream.
Usage:
./stream_positions.py <hash>
Output:
Integer indexes separated with newlines.
"""
import sys
import utils
def main():
hsh = int(sys.argv[-1], 16)
for (i, (upload, size)) in enumerate(utils.read_upload_stream()):
if upload == hsh:
print(i)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script for printing file upload moments in the stream<commit_after>
|
#!/usr/bin/env python3
#
# Copyright 2015 Secure Systems Group, Aalto University https://se-sy.org/.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script that prints the positions of the uploads in the stream.
Usage:
./stream_positions.py <hash>
Output:
Integer indexes separated with newlines.
"""
import sys
import utils
def main():
hsh = int(sys.argv[-1], 16)
for (i, (upload, size)) in enumerate(utils.read_upload_stream()):
if upload == hsh:
print(i)
if __name__ == "__main__":
main()
|
Add a script for printing file upload moments in the stream#!/usr/bin/env python3
#
# Copyright 2015 Secure Systems Group, Aalto University https://se-sy.org/.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script that prints the positions of the uploads in the stream.
Usage:
./stream_positions.py <hash>
Output:
Integer indexes separated with newlines.
"""
import sys
import utils
def main():
hsh = int(sys.argv[-1], 16)
for (i, (upload, size)) in enumerate(utils.read_upload_stream()):
if upload == hsh:
print(i)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script for printing file upload moments in the stream<commit_after>#!/usr/bin/env python3
#
# Copyright 2015 Secure Systems Group, Aalto University https://se-sy.org/.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script that prints the positions of the uploads in the stream.
Usage:
./stream_positions.py <hash>
Output:
Integer indexes separated with newlines.
"""
import sys
import utils
def main():
hsh = int(sys.argv[-1], 16)
for (i, (upload, size)) in enumerate(utils.read_upload_stream()):
if upload == hsh:
print(i)
if __name__ == "__main__":
main()
|
|
ac9ac690e9ae584333eaf960703926c1b6cfb531
|
framework/archiver/__init__.py
|
framework/archiver/__init__.py
|
from website.project import signals as project_signals
class StatResult(object):
def __init__(self, target, problems=[], num_files=0, disk_usage=0, owners=[]):
self.target = target
self.num_files = num_files
self.disk_usage = disk_usage
self.owners = owners
self.problems = []
class AggregateStatResult(object):
def __init__(self, targets=[]):
self.targets = {
item.target: item
for item in targets
}
@property
def problems(self):
return reduce(lambda accum, target: (accum or []) + target.problems, self.targets.values())
@property
def num_files(self):
return reduce(lambda accum, target: (accum or 0) + target.num_files, self.targets.values())
@property
def disk_usage(self):
return reduce(lambda accum, target: (accum or 0) + target.disk_usage, self.targets.values())
class NodeArchiveError(Exception):
pass
class NodeArchiver(object):
def __init__(self):
pass
def stat_addons(self, node):
return AggregateStatResult([node_addon.stat() for node_addon in node.get_addons()])
def stat_node(self, node):
addon_result = self.stat_addons(node)
return addon_result
def _archive(self, src, dst):
# TODO
pass
@project_signals.after_create_registration.connect
def archive(self, src, dst, user):
src.archiving = True
result = self.stat_node(src)
if result.problems:
raise NodeArchiveError
else:
self._archive(src, dst)
|
Add preliminary class interface for Archiver
|
Add preliminary class interface for Archiver
|
Python
|
apache-2.0
|
DanielSBrown/osf.io,RomanZWang/osf.io,sloria/osf.io,zachjanicki/osf.io,sbt9uc/osf.io,TomHeatwole/osf.io,reinaH/osf.io,dplorimer/osf,jnayak1/osf.io,HalcyonChimera/osf.io,samchrisinger/osf.io,baylee-d/osf.io,dplorimer/osf,kch8qx/osf.io,CenterForOpenScience/osf.io,cosenal/osf.io,mluo613/osf.io,jmcarp/osf.io,alexschiller/osf.io,ckc6cz/osf.io,lyndsysimon/osf.io,danielneis/osf.io,wearpants/osf.io,crcresearch/osf.io,cslzchen/osf.io,aaxelb/osf.io,abought/osf.io,lyndsysimon/osf.io,haoyuchen1992/osf.io,arpitar/osf.io,RomanZWang/osf.io,GageGaskins/osf.io,cldershem/osf.io,petermalcolm/osf.io,asanfilippo7/osf.io,cosenal/osf.io,HarryRybacki/osf.io,brandonPurvis/osf.io,saradbowman/osf.io,fabianvf/osf.io,mluke93/osf.io,zamattiac/osf.io,sloria/osf.io,pattisdr/osf.io,acshi/osf.io,njantrania/osf.io,mattclark/osf.io,fabianvf/osf.io,jeffreyliu3230/osf.io,danielneis/osf.io,CenterForOpenScience/osf.io,jeffreyliu3230/osf.io,sbt9uc/osf.io,emetsger/osf.io,KAsante95/osf.io,ZobairAlijan/osf.io,billyhunt/osf.io,TomHeatwole/osf.io,chrisseto/osf.io,SSJohns/osf.io,danielneis/osf.io,HalcyonChimera/osf.io,fabianvf/osf.io,asanfilippo7/osf.io,mfraezz/osf.io,wearpants/osf.io,jolene-esposito/osf.io,samanehsan/osf.io,lyndsysimon/osf.io,haoyuchen1992/osf.io,hmoco/osf.io,icereval/osf.io,cslzchen/osf.io,jolene-esposito/osf.io,chrisseto/osf.io,caseyrygt/osf.io,petermalcolm/osf.io,KAsante95/osf.io,jolene-esposito/osf.io,kch8qx/osf.io,cwisecarver/osf.io,adlius/osf.io,monikagrabowska/osf.io,njantrania/osf.io,sbt9uc/osf.io,arpitar/osf.io,cosenal/osf.io,pattisdr/osf.io,felliott/osf.io,billyhunt/osf.io,monikagrabowska/osf.io,bdyetton/prettychart,ZobairAlijan/osf.io,leb2dg/osf.io,doublebits/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,amyshi188/osf.io,Ghalko/osf.io,monikagrabowska/osf.io,Ghalko/osf.io,brianjgeiger/osf.io,GageGaskins/osf.io,bdyetton/prettychart,monikagrabowska/osf.io,TomBaxter/osf.io,rdhyee/osf.io,pattisdr/osf.io,lyndsysimon/osf.io,caseyrollins/osf.io,adlius/osf.io,TomBaxter/osf.io,KAsante95/osf.io,Ghalko/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,doublebits/osf.io,abought/osf.io,asanfilippo7/osf.io,billyhunt/osf.io,felliott/osf.io,mluo613/osf.io,acshi/osf.io,baylee-d/osf.io,arpitar/osf.io,KAsante95/osf.io,icereval/osf.io,TomHeatwole/osf.io,GageGaskins/osf.io,SSJohns/osf.io,aaxelb/osf.io,samchrisinger/osf.io,brandonPurvis/osf.io,chrisseto/osf.io,RomanZWang/osf.io,dplorimer/osf,RomanZWang/osf.io,jnayak1/osf.io,crcresearch/osf.io,acshi/osf.io,KAsante95/osf.io,ckc6cz/osf.io,Nesiehr/osf.io,emetsger/osf.io,jmcarp/osf.io,mluo613/osf.io,samanehsan/osf.io,jmcarp/osf.io,cosenal/osf.io,wearpants/osf.io,hmoco/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,MerlinZhang/osf.io,jolene-esposito/osf.io,cslzchen/osf.io,RomanZWang/osf.io,arpitar/osf.io,laurenrevere/osf.io,doublebits/osf.io,kch8qx/osf.io,emetsger/osf.io,mluo613/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,mfraezz/osf.io,cwisecarver/osf.io,danielneis/osf.io,MerlinZhang/osf.io,cldershem/osf.io,rdhyee/osf.io,brandonPurvis/osf.io,reinaH/osf.io,kwierman/osf.io,billyhunt/osf.io,jeffreyliu3230/osf.io,brianjgeiger/osf.io,adlius/osf.io,erinspace/osf.io,saradbowman/osf.io,ckc6cz/osf.io,cslzchen/osf.io,caseyrygt/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,zachjanicki/osf.io,petermalcolm/osf.io,MerlinZhang/osf.io,zachjanicki/osf.io,hmoco/osf.io,SSJohns/osf.io,HalcyonChimera/osf.io,bdyetton/prettychart,HarryRybacki/osf.io,cldershem/osf.io,acshi/osf.io,mfraezz/osf.io,samanehsan/osf.io,fabianvf/osf.io,DanielSBrown/osf.io,mfraezz/osf.io,MerlinZhang/osf.io,petermalcolm/osf.io,chennan47/osf.io,laurenrevere/osf.io,asanfilippo7/osf.io,reinaH/osf.io,brandonPurvis/osf.io,haoyuchen1992/osf.io,mluke93/osf.io,leb2dg/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,doublebits/osf.io,mluke93/osf.io,jinluyuan/osf.io,billyhunt/osf.io,samchrisinger/osf.io,hmoco/osf.io,HarryRybacki/osf.io,felliott/osf.io,SSJohns/osf.io,caneruguz/osf.io,jinluyuan/osf.io,njantrania/osf.io,abought/osf.io,jnayak1/osf.io,rdhyee/osf.io,abought/osf.io,kwierman/osf.io,dplorimer/osf,Nesiehr/osf.io,bdyetton/prettychart,kch8qx/osf.io,chennan47/osf.io,mattclark/osf.io,adlius/osf.io,ticklemepierce/osf.io,Nesiehr/osf.io,chennan47/osf.io,jinluyuan/osf.io,cldershem/osf.io,samanehsan/osf.io,zachjanicki/osf.io,samchrisinger/osf.io,kch8qx/osf.io,mluke93/osf.io,DanielSBrown/osf.io,chrisseto/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,Ghalko/osf.io,acshi/osf.io,haoyuchen1992/osf.io,icereval/osf.io,brianjgeiger/osf.io,binoculars/osf.io,ZobairAlijan/osf.io,erinspace/osf.io,ticklemepierce/osf.io,alexschiller/osf.io,jnayak1/osf.io,leb2dg/osf.io,baylee-d/osf.io,leb2dg/osf.io,ckc6cz/osf.io,amyshi188/osf.io,Johnetordoff/osf.io,erinspace/osf.io,alexschiller/osf.io,kwierman/osf.io,caneruguz/osf.io,Johnetordoff/osf.io,GageGaskins/osf.io,mluo613/osf.io,rdhyee/osf.io,HarryRybacki/osf.io,zamattiac/osf.io,wearpants/osf.io,TomBaxter/osf.io,ticklemepierce/osf.io,doublebits/osf.io,caseyrygt/osf.io,njantrania/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,emetsger/osf.io,ticklemepierce/osf.io,caseyrygt/osf.io,ZobairAlijan/osf.io,amyshi188/osf.io,Johnetordoff/osf.io,jinluyuan/osf.io,amyshi188/osf.io,zamattiac/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,reinaH/osf.io,caneruguz/osf.io,aaxelb/osf.io,aaxelb/osf.io,sbt9uc/osf.io,felliott/osf.io,mattclark/osf.io,kwierman/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,zamattiac/osf.io,binoculars/osf.io,sloria/osf.io
|
Add preliminary class interface for Archiver
|
from website.project import signals as project_signals
class StatResult(object):
def __init__(self, target, problems=[], num_files=0, disk_usage=0, owners=[]):
self.target = target
self.num_files = num_files
self.disk_usage = disk_usage
self.owners = owners
self.problems = []
class AggregateStatResult(object):
def __init__(self, targets=[]):
self.targets = {
item.target: item
for item in targets
}
@property
def problems(self):
return reduce(lambda accum, target: (accum or []) + target.problems, self.targets.values())
@property
def num_files(self):
return reduce(lambda accum, target: (accum or 0) + target.num_files, self.targets.values())
@property
def disk_usage(self):
return reduce(lambda accum, target: (accum or 0) + target.disk_usage, self.targets.values())
class NodeArchiveError(Exception):
pass
class NodeArchiver(object):
def __init__(self):
pass
def stat_addons(self, node):
return AggregateStatResult([node_addon.stat() for node_addon in node.get_addons()])
def stat_node(self, node):
addon_result = self.stat_addons(node)
return addon_result
def _archive(self, src, dst):
# TODO
pass
@project_signals.after_create_registration.connect
def archive(self, src, dst, user):
src.archiving = True
result = self.stat_node(src)
if result.problems:
raise NodeArchiveError
else:
self._archive(src, dst)
|
<commit_before><commit_msg>Add preliminary class interface for Archiver<commit_after>
|
from website.project import signals as project_signals
class StatResult(object):
def __init__(self, target, problems=[], num_files=0, disk_usage=0, owners=[]):
self.target = target
self.num_files = num_files
self.disk_usage = disk_usage
self.owners = owners
self.problems = []
class AggregateStatResult(object):
def __init__(self, targets=[]):
self.targets = {
item.target: item
for item in targets
}
@property
def problems(self):
return reduce(lambda accum, target: (accum or []) + target.problems, self.targets.values())
@property
def num_files(self):
return reduce(lambda accum, target: (accum or 0) + target.num_files, self.targets.values())
@property
def disk_usage(self):
return reduce(lambda accum, target: (accum or 0) + target.disk_usage, self.targets.values())
class NodeArchiveError(Exception):
pass
class NodeArchiver(object):
def __init__(self):
pass
def stat_addons(self, node):
return AggregateStatResult([node_addon.stat() for node_addon in node.get_addons()])
def stat_node(self, node):
addon_result = self.stat_addons(node)
return addon_result
def _archive(self, src, dst):
# TODO
pass
@project_signals.after_create_registration.connect
def archive(self, src, dst, user):
src.archiving = True
result = self.stat_node(src)
if result.problems:
raise NodeArchiveError
else:
self._archive(src, dst)
|
Add preliminary class interface for Archiverfrom website.project import signals as project_signals
class StatResult(object):
def __init__(self, target, problems=[], num_files=0, disk_usage=0, owners=[]):
self.target = target
self.num_files = num_files
self.disk_usage = disk_usage
self.owners = owners
self.problems = []
class AggregateStatResult(object):
def __init__(self, targets=[]):
self.targets = {
item.target: item
for item in targets
}
@property
def problems(self):
return reduce(lambda accum, target: (accum or []) + target.problems, self.targets.values())
@property
def num_files(self):
return reduce(lambda accum, target: (accum or 0) + target.num_files, self.targets.values())
@property
def disk_usage(self):
return reduce(lambda accum, target: (accum or 0) + target.disk_usage, self.targets.values())
class NodeArchiveError(Exception):
pass
class NodeArchiver(object):
def __init__(self):
pass
def stat_addons(self, node):
return AggregateStatResult([node_addon.stat() for node_addon in node.get_addons()])
def stat_node(self, node):
addon_result = self.stat_addons(node)
return addon_result
def _archive(self, src, dst):
# TODO
pass
@project_signals.after_create_registration.connect
def archive(self, src, dst, user):
src.archiving = True
result = self.stat_node(src)
if result.problems:
raise NodeArchiveError
else:
self._archive(src, dst)
|
<commit_before><commit_msg>Add preliminary class interface for Archiver<commit_after>from website.project import signals as project_signals
class StatResult(object):
def __init__(self, target, problems=[], num_files=0, disk_usage=0, owners=[]):
self.target = target
self.num_files = num_files
self.disk_usage = disk_usage
self.owners = owners
self.problems = []
class AggregateStatResult(object):
def __init__(self, targets=[]):
self.targets = {
item.target: item
for item in targets
}
@property
def problems(self):
return reduce(lambda accum, target: (accum or []) + target.problems, self.targets.values())
@property
def num_files(self):
return reduce(lambda accum, target: (accum or 0) + target.num_files, self.targets.values())
@property
def disk_usage(self):
return reduce(lambda accum, target: (accum or 0) + target.disk_usage, self.targets.values())
class NodeArchiveError(Exception):
pass
class NodeArchiver(object):
def __init__(self):
pass
def stat_addons(self, node):
return AggregateStatResult([node_addon.stat() for node_addon in node.get_addons()])
def stat_node(self, node):
addon_result = self.stat_addons(node)
return addon_result
def _archive(self, src, dst):
# TODO
pass
@project_signals.after_create_registration.connect
def archive(self, src, dst, user):
src.archiving = True
result = self.stat_node(src)
if result.problems:
raise NodeArchiveError
else:
self._archive(src, dst)
|
|
70c8b573e0d1ff84da0b3449563a9d7bea508843
|
morph_proxy.py
|
morph_proxy.py
|
# Run this with mitmdump -q -s morph_proxy.py
def request(context, flow):
# print out all the basic information to determine what request is being made
# coming from which container
# print flow.request.method
# print flow.request.host
# print flow.request.path
# print flow.request.scheme
# print flow.request.client_conn.address[0]
# print "***"
#text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn
text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn.address[0]
print text
# print "***"
|
Use mitmproxy to get basic information
|
Use mitmproxy to get basic information
|
Python
|
agpl-3.0
|
otherchirps/morph,OpenAddressesUK/morph,otherchirps/morph,openaustralia/morph,openaustralia/morph,otherchirps/morph,OpenAddressesUK/morph,otherchirps/morph,otherchirps/morph,otherchirps/morph,openaustralia/morph,openaustralia/morph,OpenAddressesUK/morph,OpenAddressesUK/morph,openaustralia/morph,openaustralia/morph,openaustralia/morph,otherchirps/morph
|
Use mitmproxy to get basic information
|
# Run this with mitmdump -q -s morph_proxy.py
def request(context, flow):
# print out all the basic information to determine what request is being made
# coming from which container
# print flow.request.method
# print flow.request.host
# print flow.request.path
# print flow.request.scheme
# print flow.request.client_conn.address[0]
# print "***"
#text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn
text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn.address[0]
print text
# print "***"
|
<commit_before><commit_msg>Use mitmproxy to get basic information<commit_after>
|
# Run this with mitmdump -q -s morph_proxy.py
def request(context, flow):
# print out all the basic information to determine what request is being made
# coming from which container
# print flow.request.method
# print flow.request.host
# print flow.request.path
# print flow.request.scheme
# print flow.request.client_conn.address[0]
# print "***"
#text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn
text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn.address[0]
print text
# print "***"
|
Use mitmproxy to get basic information# Run this with mitmdump -q -s morph_proxy.py
def request(context, flow):
# print out all the basic information to determine what request is being made
# coming from which container
# print flow.request.method
# print flow.request.host
# print flow.request.path
# print flow.request.scheme
# print flow.request.client_conn.address[0]
# print "***"
#text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn
text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn.address[0]
print text
# print "***"
|
<commit_before><commit_msg>Use mitmproxy to get basic information<commit_after># Run this with mitmdump -q -s morph_proxy.py
def request(context, flow):
# print out all the basic information to determine what request is being made
# coming from which container
# print flow.request.method
# print flow.request.host
# print flow.request.path
# print flow.request.scheme
# print flow.request.client_conn.address[0]
# print "***"
#text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn
text = flow.request.method + " " + flow.request.scheme + "://" + flow.request.host + flow.request.path + " FROM " + flow.request.client_conn.address[0]
print text
# print "***"
|
|
b9d8406282c4017feca039a291699ae90f459f17
|
trove/guestagent/common/timeutils.py
|
trove/guestagent/common/timeutils.py
|
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
Add Apache 2.0 license to source file
|
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: I38f2f1b1429ea49fb02ae1c5cb3e68d547bc1783
|
Python
|
apache-2.0
|
openstack/trove,zhangg/trove,hplustree/trove,openstack/trove,hplustree/trove,zhangg/trove
|
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: I38f2f1b1429ea49fb02ae1c5cb3e68d547bc1783
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
<commit_before>from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: I38f2f1b1429ea49fb02ae1c5cb3e68d547bc1783<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: I38f2f1b1429ea49fb02ae1c5cb3e68d547bc1783# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
<commit_before>from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: I38f2f1b1429ea49fb02ae1c5cb3e68d547bc1783<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from oslo_utils import timeutils
def float_utcnow():
return float(datetime.strftime(timeutils.utcnow(), "%s.%f"))
|
fa6a9e58a1ebd30dc202d41d9bf3e795b0043c9b
|
usingnamespace/api/views/v1/entry.py
|
usingnamespace/api/views/v1/entry.py
|
from pyramid.view import (
view_config,
view_defaults,
)
from ....views.finalisecontext import FinaliseContext
@view_defaults(context='...traversal.v1.Entry',
route_name='api',
renderer='json',
)
class Site(FinaliseContext):
@view_config()
def main(self):
entry = self.context.entry
return {
'id': str(entry.id),
'slug': entry.slug,
#'created': entry.created, # Convert to string
#'modified': entry.modified, # Convert to string
'title': entry.title,
'entry': entry.current_revision.entry,
'tags': [tag.tag for tag in entry.tags],
'published': {
'year': entry.year,
'month': entry.month,
'day': entry.day,
'time': entry.time,
} if entry.pubdate else {},
}
|
Add view for a single Entry
|
Add view for a single Entry
|
Python
|
isc
|
usingnamespace/usingnamespace
|
Add view for a single Entry
|
from pyramid.view import (
view_config,
view_defaults,
)
from ....views.finalisecontext import FinaliseContext
@view_defaults(context='...traversal.v1.Entry',
route_name='api',
renderer='json',
)
class Site(FinaliseContext):
@view_config()
def main(self):
entry = self.context.entry
return {
'id': str(entry.id),
'slug': entry.slug,
#'created': entry.created, # Convert to string
#'modified': entry.modified, # Convert to string
'title': entry.title,
'entry': entry.current_revision.entry,
'tags': [tag.tag for tag in entry.tags],
'published': {
'year': entry.year,
'month': entry.month,
'day': entry.day,
'time': entry.time,
} if entry.pubdate else {},
}
|
<commit_before><commit_msg>Add view for a single Entry<commit_after>
|
from pyramid.view import (
view_config,
view_defaults,
)
from ....views.finalisecontext import FinaliseContext
@view_defaults(context='...traversal.v1.Entry',
route_name='api',
renderer='json',
)
class Site(FinaliseContext):
@view_config()
def main(self):
entry = self.context.entry
return {
'id': str(entry.id),
'slug': entry.slug,
#'created': entry.created, # Convert to string
#'modified': entry.modified, # Convert to string
'title': entry.title,
'entry': entry.current_revision.entry,
'tags': [tag.tag for tag in entry.tags],
'published': {
'year': entry.year,
'month': entry.month,
'day': entry.day,
'time': entry.time,
} if entry.pubdate else {},
}
|
Add view for a single Entryfrom pyramid.view import (
view_config,
view_defaults,
)
from ....views.finalisecontext import FinaliseContext
@view_defaults(context='...traversal.v1.Entry',
route_name='api',
renderer='json',
)
class Site(FinaliseContext):
@view_config()
def main(self):
entry = self.context.entry
return {
'id': str(entry.id),
'slug': entry.slug,
#'created': entry.created, # Convert to string
#'modified': entry.modified, # Convert to string
'title': entry.title,
'entry': entry.current_revision.entry,
'tags': [tag.tag for tag in entry.tags],
'published': {
'year': entry.year,
'month': entry.month,
'day': entry.day,
'time': entry.time,
} if entry.pubdate else {},
}
|
<commit_before><commit_msg>Add view for a single Entry<commit_after>from pyramid.view import (
view_config,
view_defaults,
)
from ....views.finalisecontext import FinaliseContext
@view_defaults(context='...traversal.v1.Entry',
route_name='api',
renderer='json',
)
class Site(FinaliseContext):
@view_config()
def main(self):
entry = self.context.entry
return {
'id': str(entry.id),
'slug': entry.slug,
#'created': entry.created, # Convert to string
#'modified': entry.modified, # Convert to string
'title': entry.title,
'entry': entry.current_revision.entry,
'tags': [tag.tag for tag in entry.tags],
'published': {
'year': entry.year,
'month': entry.month,
'day': entry.day,
'time': entry.time,
} if entry.pubdate else {},
}
|
|
55ea281c96228e46b8520f2aa7305116c21c6b20
|
mailchimp3/entities/campaignfolder.py
|
mailchimp3/entities/campaignfolder.py
|
# coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolder(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolder, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
response = self._mc_client._post(url=self._build_path(), data=data)
self.folder_id = response['id']
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
Implement the Campaign Folders endpoint
|
Implement the Campaign Folders endpoint
|
Python
|
mit
|
charlesthk/python-mailchimp
|
Implement the Campaign Folders endpoint
|
# coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolder(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolder, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
response = self._mc_client._post(url=self._build_path(), data=data)
self.folder_id = response['id']
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
<commit_before><commit_msg>Implement the Campaign Folders endpoint<commit_after>
|
# coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolder(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolder, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
response = self._mc_client._post(url=self._build_path(), data=data)
self.folder_id = response['id']
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
Implement the Campaign Folders endpoint# coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolder(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolder, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
response = self._mc_client._post(url=self._build_path(), data=data)
self.folder_id = response['id']
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
<commit_before><commit_msg>Implement the Campaign Folders endpoint<commit_after># coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolder(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolder, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
response = self._mc_client._post(url=self._build_path(), data=data)
self.folder_id = response['id']
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
|
ede8c653722d8f316a368ec490023cff3abdeed0
|
dashboard/helpers.py
|
dashboard/helpers.py
|
import json
import requests
def get_coin_price(coin_name):
url = "https://bittrex.com/api/v1.1/public/getticker?market=USDT-{}".format(coin_name)
data = requests.get(url).json()
last_price = data.get("result").get("Last")
data = json.dumps({
coin_name: last_price
})
return data
|
Add helper to get the coin price
|
Add helper to get the coin price
|
Python
|
mit
|
alessandroHenrique/coinpricemonitor,alessandroHenrique/coinpricemonitor,alessandroHenrique/coinpricemonitor
|
Add helper to get the coin price
|
import json
import requests
def get_coin_price(coin_name):
url = "https://bittrex.com/api/v1.1/public/getticker?market=USDT-{}".format(coin_name)
data = requests.get(url).json()
last_price = data.get("result").get("Last")
data = json.dumps({
coin_name: last_price
})
return data
|
<commit_before><commit_msg>Add helper to get the coin price<commit_after>
|
import json
import requests
def get_coin_price(coin_name):
url = "https://bittrex.com/api/v1.1/public/getticker?market=USDT-{}".format(coin_name)
data = requests.get(url).json()
last_price = data.get("result").get("Last")
data = json.dumps({
coin_name: last_price
})
return data
|
Add helper to get the coin priceimport json
import requests
def get_coin_price(coin_name):
url = "https://bittrex.com/api/v1.1/public/getticker?market=USDT-{}".format(coin_name)
data = requests.get(url).json()
last_price = data.get("result").get("Last")
data = json.dumps({
coin_name: last_price
})
return data
|
<commit_before><commit_msg>Add helper to get the coin price<commit_after>import json
import requests
def get_coin_price(coin_name):
url = "https://bittrex.com/api/v1.1/public/getticker?market=USDT-{}".format(coin_name)
data = requests.get(url).json()
last_price = data.get("result").get("Last")
data = json.dumps({
coin_name: last_price
})
return data
|
|
5ea598d820997526bcde6e83b01852cc69f22d9b
|
scripts/ensure_log_backrefs.py
|
scripts/ensure_log_backrefs.py
|
# -*- coding: utf-8 -*-
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
def main():
init_app(routes=False)
for record in models.Node.find():
ensure_backrefs(record, ['logs'])
print('Done.')
if __name__ == "__main__":
main()
|
Add script for ensuring log backrefs are correct
|
Add script for ensuring log backrefs are correct
|
Python
|
apache-2.0
|
wearpants/osf.io,caneruguz/osf.io,RomanZWang/osf.io,baylee-d/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,RomanZWang/osf.io,binoculars/osf.io,acshi/osf.io,jnayak1/osf.io,mluo613/osf.io,zamattiac/osf.io,alexschiller/osf.io,mfraezz/osf.io,amyshi188/osf.io,samchrisinger/osf.io,emetsger/osf.io,emetsger/osf.io,alexschiller/osf.io,kch8qx/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,aaxelb/osf.io,hmoco/osf.io,chennan47/osf.io,mluke93/osf.io,mluo613/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,mluke93/osf.io,kwierman/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,icereval/osf.io,crcresearch/osf.io,DanielSBrown/osf.io,TomBaxter/osf.io,rdhyee/osf.io,caneruguz/osf.io,Nesiehr/osf.io,mluo613/osf.io,erinspace/osf.io,mfraezz/osf.io,mfraezz/osf.io,jnayak1/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,mluo613/osf.io,cslzchen/osf.io,leb2dg/osf.io,TomBaxter/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,caseyrollins/osf.io,kch8qx/osf.io,leb2dg/osf.io,RomanZWang/osf.io,acshi/osf.io,icereval/osf.io,zachjanicki/osf.io,adlius/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,asanfilippo7/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,pattisdr/osf.io,zamattiac/osf.io,kch8qx/osf.io,mattclark/osf.io,cwisecarver/osf.io,mluo613/osf.io,TomHeatwole/osf.io,TomHeatwole/osf.io,caseyrollins/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,baylee-d/osf.io,doublebits/osf.io,acshi/osf.io,abought/osf.io,crcresearch/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,emetsger/osf.io,wearpants/osf.io,zamattiac/osf.io,abought/osf.io,Johnetordoff/osf.io,felliott/osf.io,sloria/osf.io,doublebits/osf.io,felliott/osf.io,laurenrevere/osf.io,chrisseto/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,amyshi188/osf.io,aaxelb/osf.io,abought/osf.io,hmoco/osf.io,rdhyee/osf.io,alexschiller/osf.io,erinspace/osf.io,laurenrevere/osf.io,chrisseto/osf.io,TomHeatwole/osf.io,doublebits/osf.io,SSJohns/osf.io,samchrisinger/osf.io,DanielSBrown/osf.io,mattclark/osf.io,chennan47/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,wearpants/osf.io,abought/osf.io,laurenrevere/osf.io,hmoco/osf.io,acshi/osf.io,emetsger/osf.io,rdhyee/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,adlius/osf.io,amyshi188/osf.io,monikagrabowska/osf.io,SSJohns/osf.io,wearpants/osf.io,sloria/osf.io,SSJohns/osf.io,binoculars/osf.io,aaxelb/osf.io,Nesiehr/osf.io,kwierman/osf.io,aaxelb/osf.io,mluke93/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,Nesiehr/osf.io,leb2dg/osf.io,hmoco/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,cwisecarver/osf.io,binoculars/osf.io,RomanZWang/osf.io,kwierman/osf.io,samchrisinger/osf.io,HalcyonChimera/osf.io,adlius/osf.io,chennan47/osf.io,adlius/osf.io,monikagrabowska/osf.io,amyshi188/osf.io,sloria/osf.io,mfraezz/osf.io,cslzchen/osf.io,cwisecarver/osf.io,caneruguz/osf.io,mluke93/osf.io,kwierman/osf.io,erinspace/osf.io,icereval/osf.io,chrisseto/osf.io,kch8qx/osf.io,acshi/osf.io,alexschiller/osf.io,TomHeatwole/osf.io,zachjanicki/osf.io,jnayak1/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,zachjanicki/osf.io,zachjanicki/osf.io,doublebits/osf.io,doublebits/osf.io,crcresearch/osf.io,samchrisinger/osf.io,felliott/osf.io,felliott/osf.io,rdhyee/osf.io,zamattiac/osf.io
|
Add script for ensuring log backrefs are correct
|
# -*- coding: utf-8 -*-
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
def main():
init_app(routes=False)
for record in models.Node.find():
ensure_backrefs(record, ['logs'])
print('Done.')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script for ensuring log backrefs are correct<commit_after>
|
# -*- coding: utf-8 -*-
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
def main():
init_app(routes=False)
for record in models.Node.find():
ensure_backrefs(record, ['logs'])
print('Done.')
if __name__ == "__main__":
main()
|
Add script for ensuring log backrefs are correct# -*- coding: utf-8 -*-
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
def main():
init_app(routes=False)
for record in models.Node.find():
ensure_backrefs(record, ['logs'])
print('Done.')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script for ensuring log backrefs are correct<commit_after># -*- coding: utf-8 -*-
from website.app import init_app
from website import models
from modularodm.storedobject import ensure_backrefs
def main():
init_app(routes=False)
for record in models.Node.find():
ensure_backrefs(record, ['logs'])
print('Done.')
if __name__ == "__main__":
main()
|
|
b8ec0533e671651a72e3bd06295a56913b9e9e64
|
test/multiple_invocations_test.py
|
test/multiple_invocations_test.py
|
# Copyright (c) 2012 - 2014 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import mock_api
def test_multiple_invocations_immediate():
with mock_api.api(__file__) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=2, expect_order=1, params=_params)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1', password='a', s1='b')
ctrl1.invoke('j1', password='something else', s1='asdasdasdasdad')
|
Test multiple invocations of same job in one flow
|
Test multiple invocations of same job in one flow
|
Python
|
bsd-3-clause
|
lhupfeldt/jenkinsflow,lechat/jenkinsflow,lechat/jenkinsflow,lhupfeldt/jenkinsflow,lechat/jenkinsflow,lhupfeldt/jenkinsflow,lechat/jenkinsflow,lhupfeldt/jenkinsflow
|
Test multiple invocations of same job in one flow
|
# Copyright (c) 2012 - 2014 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import mock_api
def test_multiple_invocations_immediate():
with mock_api.api(__file__) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=2, expect_order=1, params=_params)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1', password='a', s1='b')
ctrl1.invoke('j1', password='something else', s1='asdasdasdasdad')
|
<commit_before><commit_msg>Test multiple invocations of same job in one flow<commit_after>
|
# Copyright (c) 2012 - 2014 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import mock_api
def test_multiple_invocations_immediate():
with mock_api.api(__file__) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=2, expect_order=1, params=_params)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1', password='a', s1='b')
ctrl1.invoke('j1', password='something else', s1='asdasdasdasdad')
|
Test multiple invocations of same job in one flow# Copyright (c) 2012 - 2014 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import mock_api
def test_multiple_invocations_immediate():
with mock_api.api(__file__) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=2, expect_order=1, params=_params)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1', password='a', s1='b')
ctrl1.invoke('j1', password='something else', s1='asdasdasdasdad')
|
<commit_before><commit_msg>Test multiple invocations of same job in one flow<commit_after># Copyright (c) 2012 - 2014 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import mock_api
def test_multiple_invocations_immediate():
with mock_api.api(__file__) as api:
api.flow_job()
_params = (('password', '', 'Some password'), ('s1', '', 'Some string argument'))
api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=2, expect_order=1, params=_params)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1) as ctrl1:
ctrl1.invoke('j1', password='a', s1='b')
ctrl1.invoke('j1', password='something else', s1='asdasdasdasdad')
|
|
7f143126a490e361a3495ecb8111572db23e0a56
|
serfnode/handler/file_utils.py
|
serfnode/handler/file_utils.py
|
import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
Add module for atomic file writes and waits
|
Add module for atomic file writes and waits
|
Python
|
mit
|
waltermoreira/serfnode,waltermoreira/serfnode,waltermoreira/serfnode
|
Add module for atomic file writes and waits
|
import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
<commit_before><commit_msg>Add module for atomic file writes and waits<commit_after>
|
import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
Add module for atomic file writes and waitsimport os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
<commit_before><commit_msg>Add module for atomic file writes and waits<commit_after>import os
from tempfile import mkstemp
import time
class atomic_write(object):
"""Perform an atomic write to a file.
Use as::
with atomic_write('/my_file') as f:
f.write('foo')
"""
def __init__(self, filepath):
"""
:type filepath: str
"""
self.filepath = filepath
def __enter__(self):
"""
:rtype: File
"""
_, self.temp = mkstemp(dir=os.getcwd())
self.f = open(self.temp, 'w')
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
if exc_type is None:
os.rename(self.temp, self.filepath)
def wait_for_file(filepath, sleep_interval=0.1):
"""Wait for the existence of a file.
Warning: use ``atomic_write`` to write the file, since this function
doesn't check that the file is complete.
:type filepath: str
:type sleep_interval: float
:rtype: None
"""
while not os.path.exists(filepath):
time.sleep(sleep_interval)
|
|
2bb4d373554cc62f3c55840cc0b4ecaf93fe1961
|
server/accounts/serializers.py
|
server/accounts/serializers.py
|
import HTMLParser
import random
import re
from django.contrib.auth.models import User
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name',
'last_name', 'email')
read_only_fields = ('id',)
write_only_fields = ('password',)
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
|
Add the serialization functions for the User objects.
|
Add the serialization functions for the User objects.
|
Python
|
agpl-3.0
|
TomDataworks/angular-inventory,TomDataworks/angular-inventory
|
Add the serialization functions for the User objects.
|
import HTMLParser
import random
import re
from django.contrib.auth.models import User
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name',
'last_name', 'email')
read_only_fields = ('id',)
write_only_fields = ('password',)
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
|
<commit_before><commit_msg>Add the serialization functions for the User objects.<commit_after>
|
import HTMLParser
import random
import re
from django.contrib.auth.models import User
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name',
'last_name', 'email')
read_only_fields = ('id',)
write_only_fields = ('password',)
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
|
Add the serialization functions for the User objects.import HTMLParser
import random
import re
from django.contrib.auth.models import User
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name',
'last_name', 'email')
read_only_fields = ('id',)
write_only_fields = ('password',)
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
|
<commit_before><commit_msg>Add the serialization functions for the User objects.<commit_after>import HTMLParser
import random
import re
from django.contrib.auth.models import User
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'first_name',
'last_name', 'email')
read_only_fields = ('id',)
write_only_fields = ('password',)
def restore_object(self, attrs, instance=None):
user = super(UserSerializer, self).restore_object(attrs, instance)
user.set_password(attrs['password'])
return user
|
|
7d7d50aa23b2694c47d82c77043cb81e1255fd04
|
bluebottle/projects/migrations/0043_remove_payout_status_sourcing_projects.py
|
bluebottle/projects/migrations/0043_remove_payout_status_sourcing_projects.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-30 21:49
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Project.objects.filter(amount_asked=0, payout_status__isnull=False).update(payout_status=None)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0042_merge_20170920_1332'),
]
operations = [
migrations.RunPython(forward, backward)
]
|
Add migration to remove payout status from sourcing projects
|
Add migration to remove payout status from sourcing projects
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add migration to remove payout status from sourcing projects
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-30 21:49
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Project.objects.filter(amount_asked=0, payout_status__isnull=False).update(payout_status=None)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0042_merge_20170920_1332'),
]
operations = [
migrations.RunPython(forward, backward)
]
|
<commit_before><commit_msg>Add migration to remove payout status from sourcing projects<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-30 21:49
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Project.objects.filter(amount_asked=0, payout_status__isnull=False).update(payout_status=None)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0042_merge_20170920_1332'),
]
operations = [
migrations.RunPython(forward, backward)
]
|
Add migration to remove payout status from sourcing projects# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-30 21:49
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Project.objects.filter(amount_asked=0, payout_status__isnull=False).update(payout_status=None)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0042_merge_20170920_1332'),
]
operations = [
migrations.RunPython(forward, backward)
]
|
<commit_before><commit_msg>Add migration to remove payout status from sourcing projects<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-30 21:49
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Project.objects.filter(amount_asked=0, payout_status__isnull=False).update(payout_status=None)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('projects', '0042_merge_20170920_1332'),
]
operations = [
migrations.RunPython(forward, backward)
]
|
|
ca90897d3e31fa7bab4d79c3d3ab61eb0fc53be9
|
tests/unit/tspapi/api_test.py
|
tests/unit/tspapi/api_test.py
|
#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import API
from datetime import datetime
class ApiTest(TestCase):
def test_parse_timestamp_date_string_yymmddhhmm(self):
s = '2016-01-27 3:38AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894680)
def test_parse_timestamp_date_string_yymmddhhmmss(self):
s = '2016-01-27 3:38:25AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894705)
def test_parse_timestamp_date_string_yymmddHHMM(self):
s = '2003-08-16 20:06:01'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1061089561)
def test_parse_timestamp_date_string_yymmddHHMMSS(self):
s = '2001-03-27 19:07:32'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 985748852)
def test_parse_timestamp_date_string_epoch_time(self):
s = '1466704787'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1466704787)
|
Add unit tests to check parsing of timestamps
|
Add unit tests to check parsing of timestamps
|
Python
|
apache-2.0
|
jdgwartney/pulse-api-python
|
Add unit tests to check parsing of timestamps
|
#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import API
from datetime import datetime
class ApiTest(TestCase):
def test_parse_timestamp_date_string_yymmddhhmm(self):
s = '2016-01-27 3:38AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894680)
def test_parse_timestamp_date_string_yymmddhhmmss(self):
s = '2016-01-27 3:38:25AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894705)
def test_parse_timestamp_date_string_yymmddHHMM(self):
s = '2003-08-16 20:06:01'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1061089561)
def test_parse_timestamp_date_string_yymmddHHMMSS(self):
s = '2001-03-27 19:07:32'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 985748852)
def test_parse_timestamp_date_string_epoch_time(self):
s = '1466704787'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1466704787)
|
<commit_before><commit_msg>Add unit tests to check parsing of timestamps<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import API
from datetime import datetime
class ApiTest(TestCase):
def test_parse_timestamp_date_string_yymmddhhmm(self):
s = '2016-01-27 3:38AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894680)
def test_parse_timestamp_date_string_yymmddhhmmss(self):
s = '2016-01-27 3:38:25AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894705)
def test_parse_timestamp_date_string_yymmddHHMM(self):
s = '2003-08-16 20:06:01'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1061089561)
def test_parse_timestamp_date_string_yymmddHHMMSS(self):
s = '2001-03-27 19:07:32'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 985748852)
def test_parse_timestamp_date_string_epoch_time(self):
s = '1466704787'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1466704787)
|
Add unit tests to check parsing of timestamps#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import API
from datetime import datetime
class ApiTest(TestCase):
def test_parse_timestamp_date_string_yymmddhhmm(self):
s = '2016-01-27 3:38AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894680)
def test_parse_timestamp_date_string_yymmddhhmmss(self):
s = '2016-01-27 3:38:25AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894705)
def test_parse_timestamp_date_string_yymmddHHMM(self):
s = '2003-08-16 20:06:01'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1061089561)
def test_parse_timestamp_date_string_yymmddHHMMSS(self):
s = '2001-03-27 19:07:32'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 985748852)
def test_parse_timestamp_date_string_epoch_time(self):
s = '1466704787'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1466704787)
|
<commit_before><commit_msg>Add unit tests to check parsing of timestamps<commit_after>#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import API
from datetime import datetime
class ApiTest(TestCase):
def test_parse_timestamp_date_string_yymmddhhmm(self):
s = '2016-01-27 3:38AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894680)
def test_parse_timestamp_date_string_yymmddhhmmss(self):
s = '2016-01-27 3:38:25AM'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1453894705)
def test_parse_timestamp_date_string_yymmddHHMM(self):
s = '2003-08-16 20:06:01'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1061089561)
def test_parse_timestamp_date_string_yymmddHHMMSS(self):
s = '2001-03-27 19:07:32'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 985748852)
def test_parse_timestamp_date_string_epoch_time(self):
s = '1466704787'
d = API._parse_time_date(s)
self.assertEqual(type(d), int)
self.assertEqual(d, 1466704787)
|
|
9eaf22a0fb928bb51090a25302d04219df3cdce6
|
serial-selector-test.py
|
serial-selector-test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, serial, wx
from serial.tools import list_ports
class COMPort:
def __init__(self, id, name):
self.id = id
self.name = name
class COMPortSelectForm(wx.Frame):
def __init__(self):
# Create non-resizable frame
wx.Frame.__init__(self, None, title="Select Arduino COM port", style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER, size=(200, 135))
self.Centre()
panel = wx.Panel(self, wx.ID_ANY)
# Create a drop down menu with all available COM ports
comPortChoices = []
combobox = wx.ComboBox(panel, size=wx.DefaultSize, choices=comPortChoices, style=wx.CB_READONLY)
self.combobox = combobox
for comport in list_serial_ports():
combobox.Append(comport.name, comport)
combobox.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(panel, label="Select Arduino COM port"), flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(combobox, 0, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, border=10)
selectButton = wx.Button(panel, label="Select", size=(-1, 30))
sizer.Add(selectButton, 0, flag=wx.ALL|wx.EXPAND, border=10)
selectButton.Bind(wx.EVT_BUTTON, self.onClick)
panel.SetSizer(sizer)
def onClick(self, event):
print "You selected: " + self.combobox.GetStringSelection()
# Source: http://stackoverflow.com/a/14224477
def list_serial_ports():
# Windows
if os.name == "nt":
# Scan for available ports
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append(COMPort(i, "COM"+str(i + 1)))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [COMPort(port[0], port[0]) for port in list_ports.comports()]
if __name__ == "__main__":
app = wx.App(False)
frame = COMPortSelectForm()
frame.Show()
app.MainLoop()
|
Create COM port selection GUI.
|
Create COM port selection GUI.
|
Python
|
bsd-3-clause
|
vegarbg/ksp-telemetry
|
Create COM port selection GUI.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, serial, wx
from serial.tools import list_ports
class COMPort:
def __init__(self, id, name):
self.id = id
self.name = name
class COMPortSelectForm(wx.Frame):
def __init__(self):
# Create non-resizable frame
wx.Frame.__init__(self, None, title="Select Arduino COM port", style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER, size=(200, 135))
self.Centre()
panel = wx.Panel(self, wx.ID_ANY)
# Create a drop down menu with all available COM ports
comPortChoices = []
combobox = wx.ComboBox(panel, size=wx.DefaultSize, choices=comPortChoices, style=wx.CB_READONLY)
self.combobox = combobox
for comport in list_serial_ports():
combobox.Append(comport.name, comport)
combobox.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(panel, label="Select Arduino COM port"), flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(combobox, 0, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, border=10)
selectButton = wx.Button(panel, label="Select", size=(-1, 30))
sizer.Add(selectButton, 0, flag=wx.ALL|wx.EXPAND, border=10)
selectButton.Bind(wx.EVT_BUTTON, self.onClick)
panel.SetSizer(sizer)
def onClick(self, event):
print "You selected: " + self.combobox.GetStringSelection()
# Source: http://stackoverflow.com/a/14224477
def list_serial_ports():
# Windows
if os.name == "nt":
# Scan for available ports
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append(COMPort(i, "COM"+str(i + 1)))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [COMPort(port[0], port[0]) for port in list_ports.comports()]
if __name__ == "__main__":
app = wx.App(False)
frame = COMPortSelectForm()
frame.Show()
app.MainLoop()
|
<commit_before><commit_msg>Create COM port selection GUI.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, serial, wx
from serial.tools import list_ports
class COMPort:
def __init__(self, id, name):
self.id = id
self.name = name
class COMPortSelectForm(wx.Frame):
def __init__(self):
# Create non-resizable frame
wx.Frame.__init__(self, None, title="Select Arduino COM port", style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER, size=(200, 135))
self.Centre()
panel = wx.Panel(self, wx.ID_ANY)
# Create a drop down menu with all available COM ports
comPortChoices = []
combobox = wx.ComboBox(panel, size=wx.DefaultSize, choices=comPortChoices, style=wx.CB_READONLY)
self.combobox = combobox
for comport in list_serial_ports():
combobox.Append(comport.name, comport)
combobox.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(panel, label="Select Arduino COM port"), flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(combobox, 0, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, border=10)
selectButton = wx.Button(panel, label="Select", size=(-1, 30))
sizer.Add(selectButton, 0, flag=wx.ALL|wx.EXPAND, border=10)
selectButton.Bind(wx.EVT_BUTTON, self.onClick)
panel.SetSizer(sizer)
def onClick(self, event):
print "You selected: " + self.combobox.GetStringSelection()
# Source: http://stackoverflow.com/a/14224477
def list_serial_ports():
# Windows
if os.name == "nt":
# Scan for available ports
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append(COMPort(i, "COM"+str(i + 1)))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [COMPort(port[0], port[0]) for port in list_ports.comports()]
if __name__ == "__main__":
app = wx.App(False)
frame = COMPortSelectForm()
frame.Show()
app.MainLoop()
|
Create COM port selection GUI.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, serial, wx
from serial.tools import list_ports
class COMPort:
def __init__(self, id, name):
self.id = id
self.name = name
class COMPortSelectForm(wx.Frame):
def __init__(self):
# Create non-resizable frame
wx.Frame.__init__(self, None, title="Select Arduino COM port", style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER, size=(200, 135))
self.Centre()
panel = wx.Panel(self, wx.ID_ANY)
# Create a drop down menu with all available COM ports
comPortChoices = []
combobox = wx.ComboBox(panel, size=wx.DefaultSize, choices=comPortChoices, style=wx.CB_READONLY)
self.combobox = combobox
for comport in list_serial_ports():
combobox.Append(comport.name, comport)
combobox.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(panel, label="Select Arduino COM port"), flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(combobox, 0, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, border=10)
selectButton = wx.Button(panel, label="Select", size=(-1, 30))
sizer.Add(selectButton, 0, flag=wx.ALL|wx.EXPAND, border=10)
selectButton.Bind(wx.EVT_BUTTON, self.onClick)
panel.SetSizer(sizer)
def onClick(self, event):
print "You selected: " + self.combobox.GetStringSelection()
# Source: http://stackoverflow.com/a/14224477
def list_serial_ports():
# Windows
if os.name == "nt":
# Scan for available ports
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append(COMPort(i, "COM"+str(i + 1)))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [COMPort(port[0], port[0]) for port in list_ports.comports()]
if __name__ == "__main__":
app = wx.App(False)
frame = COMPortSelectForm()
frame.Show()
app.MainLoop()
|
<commit_before><commit_msg>Create COM port selection GUI.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, serial, wx
from serial.tools import list_ports
class COMPort:
def __init__(self, id, name):
self.id = id
self.name = name
class COMPortSelectForm(wx.Frame):
def __init__(self):
# Create non-resizable frame
wx.Frame.__init__(self, None, title="Select Arduino COM port", style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER, size=(200, 135))
self.Centre()
panel = wx.Panel(self, wx.ID_ANY)
# Create a drop down menu with all available COM ports
comPortChoices = []
combobox = wx.ComboBox(panel, size=wx.DefaultSize, choices=comPortChoices, style=wx.CB_READONLY)
self.combobox = combobox
for comport in list_serial_ports():
combobox.Append(comport.name, comport)
combobox.SetSelection(0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(wx.StaticText(panel, label="Select Arduino COM port"), flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(combobox, 0, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, border=10)
selectButton = wx.Button(panel, label="Select", size=(-1, 30))
sizer.Add(selectButton, 0, flag=wx.ALL|wx.EXPAND, border=10)
selectButton.Bind(wx.EVT_BUTTON, self.onClick)
panel.SetSizer(sizer)
def onClick(self, event):
print "You selected: " + self.combobox.GetStringSelection()
# Source: http://stackoverflow.com/a/14224477
def list_serial_ports():
# Windows
if os.name == "nt":
# Scan for available ports
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append(COMPort(i, "COM"+str(i + 1)))
s.close()
except serial.SerialException:
pass
return available
else:
# Mac / Linux
return [COMPort(port[0], port[0]) for port in list_ports.comports()]
if __name__ == "__main__":
app = wx.App(False)
frame = COMPortSelectForm()
frame.Show()
app.MainLoop()
|
|
c7ac8b29ac433efeed99d121f182fe24248ff568
|
data_analysis/plot_generator.py
|
data_analysis/plot_generator.py
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
colors = ["b", "g", "r", "c", "m", "y", "k"]
weeks = ["42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "01", "02", "03",
"04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17"]
def plot_data(_range, data, color, label_name):
#plt.plot(_range, data, color+"o")
plt.plot(_range, data, color+"-", label=label_name)
plt.ylabel("Incidenza su 1000 persone")
plt.xlabel("Settimane")
_range = range(1, 29);
for f in range(1, len(sys.argv)):
_file = sys.argv[f]
document = pd.read_csv(_file)
labels = document["Settimana"]
data = document["Incidenza Totale"]
plot_data(_range, data, colors[f], labels[1][0:4])
plt.legend()
plt.xticks(_range, weeks, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
|
Add a simple script to plot data.
|
Add a simple script to plot data.
|
Python
|
mit
|
geektoni/Influenza-Like-Illness-Predictor,geektoni/Influenza-Like-Illness-Predictor
|
Add a simple script to plot data.
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
colors = ["b", "g", "r", "c", "m", "y", "k"]
weeks = ["42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "01", "02", "03",
"04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17"]
def plot_data(_range, data, color, label_name):
#plt.plot(_range, data, color+"o")
plt.plot(_range, data, color+"-", label=label_name)
plt.ylabel("Incidenza su 1000 persone")
plt.xlabel("Settimane")
_range = range(1, 29);
for f in range(1, len(sys.argv)):
_file = sys.argv[f]
document = pd.read_csv(_file)
labels = document["Settimana"]
data = document["Incidenza Totale"]
plot_data(_range, data, colors[f], labels[1][0:4])
plt.legend()
plt.xticks(_range, weeks, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
|
<commit_before><commit_msg>Add a simple script to plot data.<commit_after>
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
colors = ["b", "g", "r", "c", "m", "y", "k"]
weeks = ["42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "01", "02", "03",
"04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17"]
def plot_data(_range, data, color, label_name):
#plt.plot(_range, data, color+"o")
plt.plot(_range, data, color+"-", label=label_name)
plt.ylabel("Incidenza su 1000 persone")
plt.xlabel("Settimane")
_range = range(1, 29);
for f in range(1, len(sys.argv)):
_file = sys.argv[f]
document = pd.read_csv(_file)
labels = document["Settimana"]
data = document["Incidenza Totale"]
plot_data(_range, data, colors[f], labels[1][0:4])
plt.legend()
plt.xticks(_range, weeks, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
|
Add a simple script to plot data.import sys
import pandas as pd
import matplotlib.pyplot as plt
colors = ["b", "g", "r", "c", "m", "y", "k"]
weeks = ["42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "01", "02", "03",
"04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17"]
def plot_data(_range, data, color, label_name):
#plt.plot(_range, data, color+"o")
plt.plot(_range, data, color+"-", label=label_name)
plt.ylabel("Incidenza su 1000 persone")
plt.xlabel("Settimane")
_range = range(1, 29);
for f in range(1, len(sys.argv)):
_file = sys.argv[f]
document = pd.read_csv(_file)
labels = document["Settimana"]
data = document["Incidenza Totale"]
plot_data(_range, data, colors[f], labels[1][0:4])
plt.legend()
plt.xticks(_range, weeks, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
|
<commit_before><commit_msg>Add a simple script to plot data.<commit_after>import sys
import pandas as pd
import matplotlib.pyplot as plt
colors = ["b", "g", "r", "c", "m", "y", "k"]
weeks = ["42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52", "01", "02", "03",
"04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17"]
def plot_data(_range, data, color, label_name):
#plt.plot(_range, data, color+"o")
plt.plot(_range, data, color+"-", label=label_name)
plt.ylabel("Incidenza su 1000 persone")
plt.xlabel("Settimane")
_range = range(1, 29);
for f in range(1, len(sys.argv)):
_file = sys.argv[f]
document = pd.read_csv(_file)
labels = document["Settimana"]
data = document["Incidenza Totale"]
plot_data(_range, data, colors[f], labels[1][0:4])
plt.legend()
plt.xticks(_range, weeks, rotation="vertical")
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
|
|
7919a8dfed814c4855a1b27290ab213cba03255e
|
py/base-7.py
|
py/base-7.py
|
class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "0"
neg = "-" if num < 0 else ""
num = abs(num)
ans = []
while num > 0:
ans.append(num % 7)
num /= 7
return neg + ''.join(map(str, ans[::-1]))
|
Add py solution for 504. Base 7
|
Add py solution for 504. Base 7
504. Base 7: https://leetcode.com/problems/base-7/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 504. Base 7
504. Base 7: https://leetcode.com/problems/base-7/
|
class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "0"
neg = "-" if num < 0 else ""
num = abs(num)
ans = []
while num > 0:
ans.append(num % 7)
num /= 7
return neg + ''.join(map(str, ans[::-1]))
|
<commit_before><commit_msg>Add py solution for 504. Base 7
504. Base 7: https://leetcode.com/problems/base-7/<commit_after>
|
class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "0"
neg = "-" if num < 0 else ""
num = abs(num)
ans = []
while num > 0:
ans.append(num % 7)
num /= 7
return neg + ''.join(map(str, ans[::-1]))
|
Add py solution for 504. Base 7
504. Base 7: https://leetcode.com/problems/base-7/class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "0"
neg = "-" if num < 0 else ""
num = abs(num)
ans = []
while num > 0:
ans.append(num % 7)
num /= 7
return neg + ''.join(map(str, ans[::-1]))
|
<commit_before><commit_msg>Add py solution for 504. Base 7
504. Base 7: https://leetcode.com/problems/base-7/<commit_after>class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "0"
neg = "-" if num < 0 else ""
num = abs(num)
ans = []
while num > 0:
ans.append(num % 7)
num /= 7
return neg + ''.join(map(str, ans[::-1]))
|
|
f251f18df4a3a236e1f090d11931b5bd86f6bcc7
|
tests/test_file_handlers.py
|
tests/test_file_handlers.py
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import file_handlers
def mocked_random_randint(*args, **kwargs):
"""this method will be used by the mock to replace random.randint"""
return 1
class TestSaveDataToFileFunction(unittest.TestCase):
def setUp(self):
self.data = ['hello world']
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_creates_a_file(self, mock_random):
"""save_data_to_file creates a file"""
file_handlers.save_data_to_file(self.data)
self.assertTrue(os.path.isfile('./data_1.csv'))
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_saves_data_to_file(self, mock_random):
"""save_data_to_file saves data to created file"""
file_handlers.save_data_to_file(self.data)
with open('./data_1.csv', 'r') as test_file:
data_from_file = test_file.read().replace('\n', '')
self.assertEqual('hello world', data_from_file)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_returns_location_of_created_file(self, mock_random):
"""save_data_to_file returns the location of the created file"""
path = file_handlers.save_data_to_file(self.data)
self.assertEqual(os.path.abspath('./data_1.csv'), path)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_takes_user_inputted_filename(self, mock_random):
"""save_data_to_file uses the filename given by the user when creating the file"""
filename = 'my_data'
file_handlers.save_data_to_file(self.data, filename=filename)
self.assertTrue(os.path.isfile('./my_data_1.csv'))
def tearDown(self):
if os.path.isfile('./my_data_1.csv'):
file = './my_data_1.csv'
elif os.path.isfile('./data_1.csv'):
file = './data_1.csv'
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
Add tests for save_data_to_file function
|
Add tests for save_data_to_file function
|
Python
|
mit
|
Samuel-L/cli-ws,Samuel-L/cli-ws
|
Add tests for save_data_to_file function
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import file_handlers
def mocked_random_randint(*args, **kwargs):
"""this method will be used by the mock to replace random.randint"""
return 1
class TestSaveDataToFileFunction(unittest.TestCase):
def setUp(self):
self.data = ['hello world']
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_creates_a_file(self, mock_random):
"""save_data_to_file creates a file"""
file_handlers.save_data_to_file(self.data)
self.assertTrue(os.path.isfile('./data_1.csv'))
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_saves_data_to_file(self, mock_random):
"""save_data_to_file saves data to created file"""
file_handlers.save_data_to_file(self.data)
with open('./data_1.csv', 'r') as test_file:
data_from_file = test_file.read().replace('\n', '')
self.assertEqual('hello world', data_from_file)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_returns_location_of_created_file(self, mock_random):
"""save_data_to_file returns the location of the created file"""
path = file_handlers.save_data_to_file(self.data)
self.assertEqual(os.path.abspath('./data_1.csv'), path)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_takes_user_inputted_filename(self, mock_random):
"""save_data_to_file uses the filename given by the user when creating the file"""
filename = 'my_data'
file_handlers.save_data_to_file(self.data, filename=filename)
self.assertTrue(os.path.isfile('./my_data_1.csv'))
def tearDown(self):
if os.path.isfile('./my_data_1.csv'):
file = './my_data_1.csv'
elif os.path.isfile('./data_1.csv'):
file = './data_1.csv'
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for save_data_to_file function<commit_after>
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import file_handlers
def mocked_random_randint(*args, **kwargs):
"""this method will be used by the mock to replace random.randint"""
return 1
class TestSaveDataToFileFunction(unittest.TestCase):
def setUp(self):
self.data = ['hello world']
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_creates_a_file(self, mock_random):
"""save_data_to_file creates a file"""
file_handlers.save_data_to_file(self.data)
self.assertTrue(os.path.isfile('./data_1.csv'))
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_saves_data_to_file(self, mock_random):
"""save_data_to_file saves data to created file"""
file_handlers.save_data_to_file(self.data)
with open('./data_1.csv', 'r') as test_file:
data_from_file = test_file.read().replace('\n', '')
self.assertEqual('hello world', data_from_file)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_returns_location_of_created_file(self, mock_random):
"""save_data_to_file returns the location of the created file"""
path = file_handlers.save_data_to_file(self.data)
self.assertEqual(os.path.abspath('./data_1.csv'), path)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_takes_user_inputted_filename(self, mock_random):
"""save_data_to_file uses the filename given by the user when creating the file"""
filename = 'my_data'
file_handlers.save_data_to_file(self.data, filename=filename)
self.assertTrue(os.path.isfile('./my_data_1.csv'))
def tearDown(self):
if os.path.isfile('./my_data_1.csv'):
file = './my_data_1.csv'
elif os.path.isfile('./data_1.csv'):
file = './data_1.csv'
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
Add tests for save_data_to_file functionimport os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import file_handlers
def mocked_random_randint(*args, **kwargs):
"""this method will be used by the mock to replace random.randint"""
return 1
class TestSaveDataToFileFunction(unittest.TestCase):
def setUp(self):
self.data = ['hello world']
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_creates_a_file(self, mock_random):
"""save_data_to_file creates a file"""
file_handlers.save_data_to_file(self.data)
self.assertTrue(os.path.isfile('./data_1.csv'))
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_saves_data_to_file(self, mock_random):
"""save_data_to_file saves data to created file"""
file_handlers.save_data_to_file(self.data)
with open('./data_1.csv', 'r') as test_file:
data_from_file = test_file.read().replace('\n', '')
self.assertEqual('hello world', data_from_file)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_returns_location_of_created_file(self, mock_random):
"""save_data_to_file returns the location of the created file"""
path = file_handlers.save_data_to_file(self.data)
self.assertEqual(os.path.abspath('./data_1.csv'), path)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_takes_user_inputted_filename(self, mock_random):
"""save_data_to_file uses the filename given by the user when creating the file"""
filename = 'my_data'
file_handlers.save_data_to_file(self.data, filename=filename)
self.assertTrue(os.path.isfile('./my_data_1.csv'))
def tearDown(self):
if os.path.isfile('./my_data_1.csv'):
file = './my_data_1.csv'
elif os.path.isfile('./data_1.csv'):
file = './data_1.csv'
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for save_data_to_file function<commit_after>import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from unittest import mock
from web_scraper.core import file_handlers
def mocked_random_randint(*args, **kwargs):
"""this method will be used by the mock to replace random.randint"""
return 1
class TestSaveDataToFileFunction(unittest.TestCase):
def setUp(self):
self.data = ['hello world']
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_creates_a_file(self, mock_random):
"""save_data_to_file creates a file"""
file_handlers.save_data_to_file(self.data)
self.assertTrue(os.path.isfile('./data_1.csv'))
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_saves_data_to_file(self, mock_random):
"""save_data_to_file saves data to created file"""
file_handlers.save_data_to_file(self.data)
with open('./data_1.csv', 'r') as test_file:
data_from_file = test_file.read().replace('\n', '')
self.assertEqual('hello world', data_from_file)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_returns_location_of_created_file(self, mock_random):
"""save_data_to_file returns the location of the created file"""
path = file_handlers.save_data_to_file(self.data)
self.assertEqual(os.path.abspath('./data_1.csv'), path)
@mock.patch('web_scraper.core.file_handlers.random.randint', side_effect=mocked_random_randint)
def test_takes_user_inputted_filename(self, mock_random):
"""save_data_to_file uses the filename given by the user when creating the file"""
filename = 'my_data'
file_handlers.save_data_to_file(self.data, filename=filename)
self.assertTrue(os.path.isfile('./my_data_1.csv'))
def tearDown(self):
if os.path.isfile('./my_data_1.csv'):
file = './my_data_1.csv'
elif os.path.isfile('./data_1.csv'):
file = './data_1.csv'
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
|
75498e899f017aff60a3d6dfe8ed4e39aa199f51
|
tests/su2_test.py
|
tests/su2_test.py
|
"""Test for the SU2 drudge."""
from sympy import Rational, I
from drudge import SU2LatticeDrudge
def test_su2_without_symbolic_index(spark_ctx):
"""Test SU2 lattice drudge without abstract symbolic lattice index."""
dr = SU2LatticeDrudge(spark_ctx)
p = dr.names
half = Rational(1, 2)
half_i = half / I
# Test the basic commutation rules without explicit site or on the same
# site.
for ops in [
(p.J_, p.J_p, p.J_m),
(p.J_[0], p.J_p[0], p.J_m[0])
]:
j_z, j_p, j_m = [dr.sum(i) for i in ops]
assert (j_z | j_p).simplify() == j_p
assert (j_z | j_m).simplify() == -1 * j_m
assert (j_p | j_m).simplify() == 2 * j_z
j_x = (j_p + j_m) * half
j_y = (j_p - j_m) * half_i
assert (j_x | j_y).simplify() == I * j_z
assert (j_y | j_z).simplify() == I * j_x
assert (j_z | j_x).simplify() == I * j_y
j_sq = dr.sum(
j_z * j_z + half * j_p * j_m + half * j_m * j_p
)
for i in [j_x, j_y, j_z]:
assert (j_sq | i).simplify() == 0
continue
|
Add tests for SU2 drudge without abstract lattice
|
Add tests for SU2 drudge without abstract lattice
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add tests for SU2 drudge without abstract lattice
|
"""Test for the SU2 drudge."""
from sympy import Rational, I
from drudge import SU2LatticeDrudge
def test_su2_without_symbolic_index(spark_ctx):
"""Test SU2 lattice drudge without abstract symbolic lattice index."""
dr = SU2LatticeDrudge(spark_ctx)
p = dr.names
half = Rational(1, 2)
half_i = half / I
# Test the basic commutation rules without explicit site or on the same
# site.
for ops in [
(p.J_, p.J_p, p.J_m),
(p.J_[0], p.J_p[0], p.J_m[0])
]:
j_z, j_p, j_m = [dr.sum(i) for i in ops]
assert (j_z | j_p).simplify() == j_p
assert (j_z | j_m).simplify() == -1 * j_m
assert (j_p | j_m).simplify() == 2 * j_z
j_x = (j_p + j_m) * half
j_y = (j_p - j_m) * half_i
assert (j_x | j_y).simplify() == I * j_z
assert (j_y | j_z).simplify() == I * j_x
assert (j_z | j_x).simplify() == I * j_y
j_sq = dr.sum(
j_z * j_z + half * j_p * j_m + half * j_m * j_p
)
for i in [j_x, j_y, j_z]:
assert (j_sq | i).simplify() == 0
continue
|
<commit_before><commit_msg>Add tests for SU2 drudge without abstract lattice<commit_after>
|
"""Test for the SU2 drudge."""
from sympy import Rational, I
from drudge import SU2LatticeDrudge
def test_su2_without_symbolic_index(spark_ctx):
"""Test SU2 lattice drudge without abstract symbolic lattice index."""
dr = SU2LatticeDrudge(spark_ctx)
p = dr.names
half = Rational(1, 2)
half_i = half / I
# Test the basic commutation rules without explicit site or on the same
# site.
for ops in [
(p.J_, p.J_p, p.J_m),
(p.J_[0], p.J_p[0], p.J_m[0])
]:
j_z, j_p, j_m = [dr.sum(i) for i in ops]
assert (j_z | j_p).simplify() == j_p
assert (j_z | j_m).simplify() == -1 * j_m
assert (j_p | j_m).simplify() == 2 * j_z
j_x = (j_p + j_m) * half
j_y = (j_p - j_m) * half_i
assert (j_x | j_y).simplify() == I * j_z
assert (j_y | j_z).simplify() == I * j_x
assert (j_z | j_x).simplify() == I * j_y
j_sq = dr.sum(
j_z * j_z + half * j_p * j_m + half * j_m * j_p
)
for i in [j_x, j_y, j_z]:
assert (j_sq | i).simplify() == 0
continue
|
Add tests for SU2 drudge without abstract lattice"""Test for the SU2 drudge."""
from sympy import Rational, I
from drudge import SU2LatticeDrudge
def test_su2_without_symbolic_index(spark_ctx):
"""Test SU2 lattice drudge without abstract symbolic lattice index."""
dr = SU2LatticeDrudge(spark_ctx)
p = dr.names
half = Rational(1, 2)
half_i = half / I
# Test the basic commutation rules without explicit site or on the same
# site.
for ops in [
(p.J_, p.J_p, p.J_m),
(p.J_[0], p.J_p[0], p.J_m[0])
]:
j_z, j_p, j_m = [dr.sum(i) for i in ops]
assert (j_z | j_p).simplify() == j_p
assert (j_z | j_m).simplify() == -1 * j_m
assert (j_p | j_m).simplify() == 2 * j_z
j_x = (j_p + j_m) * half
j_y = (j_p - j_m) * half_i
assert (j_x | j_y).simplify() == I * j_z
assert (j_y | j_z).simplify() == I * j_x
assert (j_z | j_x).simplify() == I * j_y
j_sq = dr.sum(
j_z * j_z + half * j_p * j_m + half * j_m * j_p
)
for i in [j_x, j_y, j_z]:
assert (j_sq | i).simplify() == 0
continue
|
<commit_before><commit_msg>Add tests for SU2 drudge without abstract lattice<commit_after>"""Test for the SU2 drudge."""
from sympy import Rational, I
from drudge import SU2LatticeDrudge
def test_su2_without_symbolic_index(spark_ctx):
"""Test SU2 lattice drudge without abstract symbolic lattice index."""
dr = SU2LatticeDrudge(spark_ctx)
p = dr.names
half = Rational(1, 2)
half_i = half / I
# Test the basic commutation rules without explicit site or on the same
# site.
for ops in [
(p.J_, p.J_p, p.J_m),
(p.J_[0], p.J_p[0], p.J_m[0])
]:
j_z, j_p, j_m = [dr.sum(i) for i in ops]
assert (j_z | j_p).simplify() == j_p
assert (j_z | j_m).simplify() == -1 * j_m
assert (j_p | j_m).simplify() == 2 * j_z
j_x = (j_p + j_m) * half
j_y = (j_p - j_m) * half_i
assert (j_x | j_y).simplify() == I * j_z
assert (j_y | j_z).simplify() == I * j_x
assert (j_z | j_x).simplify() == I * j_y
j_sq = dr.sum(
j_z * j_z + half * j_p * j_m + half * j_m * j_p
)
for i in [j_x, j_y, j_z]:
assert (j_sq | i).simplify() == 0
continue
|
|
1dfe51ab87f090d57452937ce5c52d4f5541eac3
|
sfstation.py
|
sfstation.py
|
#!/usr/bin/env python3
import requests
import time
from bs4 import BeautifulSoup
day = "02-01-2020"
last_day_string = "03-01-2020"
page = requests.get("https://www.sfstation.com/calendar/{}".format(day))
soup = BeautifulSoup(page.content)
events_seen = set()
last_day = False
# loop through every day until the next month
while not last_day:
print(day)
last_page = False
while not last_page:
events = soup.find_all('div', {'class': 'ev_in ev_mobile_c'})
todays_events = set()
for event in events:
title = event.find('span', {'itemprop': 'name'}).get_text()
location = event.find('span', {'itemprop': 'location'}).find('span', {'itemprop': 'name'}).get_text()
address = event.find('span', {'itemprop': 'streetAddress'}).get_text()
url = event.find('span', {'itemprop': 'url'}).get_text()
todays_events.add("{} {} @ {} ({}) {}".format(('-' if title not in events_seen else '+'), title, location, address, url))
events_seen.add(title)
for event in sorted(todays_events):
print(event)
if soup.find('a', {'rel': 'next'}) is not None:
time.sleep(5)
next_page = 'https://www.sfstation.com/' + soup.find('a', {'rel': 'next'})['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_page = True
next_day = soup.find('a', text='next day >')
if next_day['href'][-10:] != last_day_string:
time.sleep(5)
day = next_day['href'][-10:]
next_page = 'https://www.sfstation.com' + next_day['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_day = True
|
Add thrown together date script
|
Add thrown together date script
|
Python
|
mit
|
feilen/dotfiles,feilen/dotfiles,feilen/dotfiles
|
Add thrown together date script
|
#!/usr/bin/env python3
import requests
import time
from bs4 import BeautifulSoup
day = "02-01-2020"
last_day_string = "03-01-2020"
page = requests.get("https://www.sfstation.com/calendar/{}".format(day))
soup = BeautifulSoup(page.content)
events_seen = set()
last_day = False
# loop through every day until the next month
while not last_day:
print(day)
last_page = False
while not last_page:
events = soup.find_all('div', {'class': 'ev_in ev_mobile_c'})
todays_events = set()
for event in events:
title = event.find('span', {'itemprop': 'name'}).get_text()
location = event.find('span', {'itemprop': 'location'}).find('span', {'itemprop': 'name'}).get_text()
address = event.find('span', {'itemprop': 'streetAddress'}).get_text()
url = event.find('span', {'itemprop': 'url'}).get_text()
todays_events.add("{} {} @ {} ({}) {}".format(('-' if title not in events_seen else '+'), title, location, address, url))
events_seen.add(title)
for event in sorted(todays_events):
print(event)
if soup.find('a', {'rel': 'next'}) is not None:
time.sleep(5)
next_page = 'https://www.sfstation.com/' + soup.find('a', {'rel': 'next'})['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_page = True
next_day = soup.find('a', text='next day >')
if next_day['href'][-10:] != last_day_string:
time.sleep(5)
day = next_day['href'][-10:]
next_page = 'https://www.sfstation.com' + next_day['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_day = True
|
<commit_before><commit_msg>Add thrown together date script<commit_after>
|
#!/usr/bin/env python3
import requests
import time
from bs4 import BeautifulSoup
day = "02-01-2020"
last_day_string = "03-01-2020"
page = requests.get("https://www.sfstation.com/calendar/{}".format(day))
soup = BeautifulSoup(page.content)
events_seen = set()
last_day = False
# loop through every day until the next month
while not last_day:
print(day)
last_page = False
while not last_page:
events = soup.find_all('div', {'class': 'ev_in ev_mobile_c'})
todays_events = set()
for event in events:
title = event.find('span', {'itemprop': 'name'}).get_text()
location = event.find('span', {'itemprop': 'location'}).find('span', {'itemprop': 'name'}).get_text()
address = event.find('span', {'itemprop': 'streetAddress'}).get_text()
url = event.find('span', {'itemprop': 'url'}).get_text()
todays_events.add("{} {} @ {} ({}) {}".format(('-' if title not in events_seen else '+'), title, location, address, url))
events_seen.add(title)
for event in sorted(todays_events):
print(event)
if soup.find('a', {'rel': 'next'}) is not None:
time.sleep(5)
next_page = 'https://www.sfstation.com/' + soup.find('a', {'rel': 'next'})['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_page = True
next_day = soup.find('a', text='next day >')
if next_day['href'][-10:] != last_day_string:
time.sleep(5)
day = next_day['href'][-10:]
next_page = 'https://www.sfstation.com' + next_day['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_day = True
|
Add thrown together date script#!/usr/bin/env python3
import requests
import time
from bs4 import BeautifulSoup
day = "02-01-2020"
last_day_string = "03-01-2020"
page = requests.get("https://www.sfstation.com/calendar/{}".format(day))
soup = BeautifulSoup(page.content)
events_seen = set()
last_day = False
# loop through every day until the next month
while not last_day:
print(day)
last_page = False
while not last_page:
events = soup.find_all('div', {'class': 'ev_in ev_mobile_c'})
todays_events = set()
for event in events:
title = event.find('span', {'itemprop': 'name'}).get_text()
location = event.find('span', {'itemprop': 'location'}).find('span', {'itemprop': 'name'}).get_text()
address = event.find('span', {'itemprop': 'streetAddress'}).get_text()
url = event.find('span', {'itemprop': 'url'}).get_text()
todays_events.add("{} {} @ {} ({}) {}".format(('-' if title not in events_seen else '+'), title, location, address, url))
events_seen.add(title)
for event in sorted(todays_events):
print(event)
if soup.find('a', {'rel': 'next'}) is not None:
time.sleep(5)
next_page = 'https://www.sfstation.com/' + soup.find('a', {'rel': 'next'})['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_page = True
next_day = soup.find('a', text='next day >')
if next_day['href'][-10:] != last_day_string:
time.sleep(5)
day = next_day['href'][-10:]
next_page = 'https://www.sfstation.com' + next_day['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_day = True
|
<commit_before><commit_msg>Add thrown together date script<commit_after>#!/usr/bin/env python3
import requests
import time
from bs4 import BeautifulSoup
day = "02-01-2020"
last_day_string = "03-01-2020"
page = requests.get("https://www.sfstation.com/calendar/{}".format(day))
soup = BeautifulSoup(page.content)
events_seen = set()
last_day = False
# loop through every day until the next month
while not last_day:
print(day)
last_page = False
while not last_page:
events = soup.find_all('div', {'class': 'ev_in ev_mobile_c'})
todays_events = set()
for event in events:
title = event.find('span', {'itemprop': 'name'}).get_text()
location = event.find('span', {'itemprop': 'location'}).find('span', {'itemprop': 'name'}).get_text()
address = event.find('span', {'itemprop': 'streetAddress'}).get_text()
url = event.find('span', {'itemprop': 'url'}).get_text()
todays_events.add("{} {} @ {} ({}) {}".format(('-' if title not in events_seen else '+'), title, location, address, url))
events_seen.add(title)
for event in sorted(todays_events):
print(event)
if soup.find('a', {'rel': 'next'}) is not None:
time.sleep(5)
next_page = 'https://www.sfstation.com/' + soup.find('a', {'rel': 'next'})['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_page = True
next_day = soup.find('a', text='next day >')
if next_day['href'][-10:] != last_day_string:
time.sleep(5)
day = next_day['href'][-10:]
next_page = 'https://www.sfstation.com' + next_day['href']
page = requests.get(next_page)
soup = BeautifulSoup(page.content)
else:
last_day = True
|
|
2ee1ef825bb6aa6ff74228c90d4656da6c3ddde6
|
201507/ledpwm.py
|
201507/ledpwm.py
|
#!/usr/bin/python
led0 = open('/dev/rtled0', 'a')
n = 0
while True :
print >> led0, "%d" % (0 if n%5 else 1)
n = n + 1
|
Add pwm Python code for LED
|
Add pwm Python code for LED
|
Python
|
mit
|
ryuichiueda/RPiM,ryuichiueda/RPiM
|
Add pwm Python code for LED
|
#!/usr/bin/python
led0 = open('/dev/rtled0', 'a')
n = 0
while True :
print >> led0, "%d" % (0 if n%5 else 1)
n = n + 1
|
<commit_before><commit_msg>Add pwm Python code for LED<commit_after>
|
#!/usr/bin/python
led0 = open('/dev/rtled0', 'a')
n = 0
while True :
print >> led0, "%d" % (0 if n%5 else 1)
n = n + 1
|
Add pwm Python code for LED#!/usr/bin/python
led0 = open('/dev/rtled0', 'a')
n = 0
while True :
print >> led0, "%d" % (0 if n%5 else 1)
n = n + 1
|
<commit_before><commit_msg>Add pwm Python code for LED<commit_after>#!/usr/bin/python
led0 = open('/dev/rtled0', 'a')
n = 0
while True :
print >> led0, "%d" % (0 if n%5 else 1)
n = n + 1
|
|
f2472202af524887ae18943be368db647e4c3db8
|
plugins/words.py
|
plugins/words.py
|
from difflib import get_close_matches
import discord
from pcbot import Annotate
import plugins
client = plugins.client
def load_wordlist(filename: str):
with open("plugins/wordlib/SimpleWordlists/Thesaurus-" + filename + ".txt") as f:
return {k: v.split(",") for k, v in [line.split("|") for line in f.readlines()]}
antonyms = load_wordlist("Antonyms-All")
synonyms = load_wordlist("Synonyms-All")
@plugins.command()
async def antonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in antonyms:
matches = get_close_matches(phrase, antonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no antonyms for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in antonyms[phrase]))
@plugins.command()
async def synonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in synonyms:
matches = get_close_matches(phrase, synonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no synonym for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in synonyms[phrase]))
@plugins.command()
async def homonym(message: discord.Message, phrase: Annotate.CleanContent):
await client.say(message, phrase)
|
Add autonym and synonym commands
|
Add autonym and synonym commands
|
Python
|
mit
|
PcBoy111/PCBOT,PcBoy111/PC-BOT-V2,pckv/pcbot
|
Add autonym and synonym commands
|
from difflib import get_close_matches
import discord
from pcbot import Annotate
import plugins
client = plugins.client
def load_wordlist(filename: str):
with open("plugins/wordlib/SimpleWordlists/Thesaurus-" + filename + ".txt") as f:
return {k: v.split(",") for k, v in [line.split("|") for line in f.readlines()]}
antonyms = load_wordlist("Antonyms-All")
synonyms = load_wordlist("Synonyms-All")
@plugins.command()
async def antonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in antonyms:
matches = get_close_matches(phrase, antonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no antonyms for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in antonyms[phrase]))
@plugins.command()
async def synonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in synonyms:
matches = get_close_matches(phrase, synonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no synonym for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in synonyms[phrase]))
@plugins.command()
async def homonym(message: discord.Message, phrase: Annotate.CleanContent):
await client.say(message, phrase)
|
<commit_before><commit_msg>Add autonym and synonym commands<commit_after>
|
from difflib import get_close_matches
import discord
from pcbot import Annotate
import plugins
client = plugins.client
def load_wordlist(filename: str):
with open("plugins/wordlib/SimpleWordlists/Thesaurus-" + filename + ".txt") as f:
return {k: v.split(",") for k, v in [line.split("|") for line in f.readlines()]}
antonyms = load_wordlist("Antonyms-All")
synonyms = load_wordlist("Synonyms-All")
@plugins.command()
async def antonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in antonyms:
matches = get_close_matches(phrase, antonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no antonyms for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in antonyms[phrase]))
@plugins.command()
async def synonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in synonyms:
matches = get_close_matches(phrase, synonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no synonym for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in synonyms[phrase]))
@plugins.command()
async def homonym(message: discord.Message, phrase: Annotate.CleanContent):
await client.say(message, phrase)
|
Add autonym and synonym commandsfrom difflib import get_close_matches
import discord
from pcbot import Annotate
import plugins
client = plugins.client
def load_wordlist(filename: str):
with open("plugins/wordlib/SimpleWordlists/Thesaurus-" + filename + ".txt") as f:
return {k: v.split(",") for k, v in [line.split("|") for line in f.readlines()]}
antonyms = load_wordlist("Antonyms-All")
synonyms = load_wordlist("Synonyms-All")
@plugins.command()
async def antonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in antonyms:
matches = get_close_matches(phrase, antonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no antonyms for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in antonyms[phrase]))
@plugins.command()
async def synonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in synonyms:
matches = get_close_matches(phrase, synonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no synonym for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in synonyms[phrase]))
@plugins.command()
async def homonym(message: discord.Message, phrase: Annotate.CleanContent):
await client.say(message, phrase)
|
<commit_before><commit_msg>Add autonym and synonym commands<commit_after>from difflib import get_close_matches
import discord
from pcbot import Annotate
import plugins
client = plugins.client
def load_wordlist(filename: str):
with open("plugins/wordlib/SimpleWordlists/Thesaurus-" + filename + ".txt") as f:
return {k: v.split(",") for k, v in [line.split("|") for line in f.readlines()]}
antonyms = load_wordlist("Antonyms-All")
synonyms = load_wordlist("Synonyms-All")
@plugins.command()
async def antonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in antonyms:
matches = get_close_matches(phrase, antonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no antonyms for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in antonyms[phrase]))
@plugins.command()
async def synonym(message: discord.Message, phrase: Annotate.CleanContent):
phrase = phrase.lower()
if phrase not in synonyms:
matches = get_close_matches(phrase, synonyms.keys(), n=5, cutoff=0.6)
await client.say(message, "Found no synonym for {}. Did you mean {}".format(phrase, ", ".join("`" + match + "`" for match in matches)))
return
await client.say(message, ", ".join(s.strip(" \n") for s in synonyms[phrase]))
@plugins.command()
async def homonym(message: discord.Message, phrase: Annotate.CleanContent):
await client.say(message, phrase)
|
|
7d45fd15f9d2fa4e0d830e7f404fb77d531adc29
|
examples/test-combo-box.py
|
examples/test-combo-box.py
|
"""
This test is adopted form nbtk, but since it's summer it uses
Munich's most famous Beergarden instead of places in London ;)
"""
import clutter
import nbtk
def title_changed_cb(box, pspec):
print 'title now:', box.get_title()
def index_changed_cb(box, pspec):
print 'index now:', box.get_index()
def stage_key_press_cb(actor, event, box):
from clutter import keysyms
if event.keyval == keysyms.r:
box.set_title('Munich')
elif event.keyval >= ord('0') and event.keyval <= ord('9'):
box.set_index(event.keyval - 48)
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
combo = nbtk.ComboBox()
stage.add(combo)
combo.set_title('Munich')
combo.append_text('Augustinerkeller')
combo.append_text('Hirschgarten')
combo.append_text('Nockherberg')
combo.append_text('Seehaus')
combo.append_text('Chinesischer Turm')
combo.append_text('Zum Flaucher')
combo.connect('notify::title', title_changed_cb)
combo.connect('notify::index', index_changed_cb)
stage.connect('key-press-event', stage_key_press_cb, combo)
stage.show()
clutter.main()
|
Add a simple test for ComboBox
|
Add a simple test for ComboBox
|
Python
|
lgpl-2.1
|
buztard/mxpy,buztard/mxpy,buztard/mxpy
|
Add a simple test for ComboBox
|
"""
This test is adopted form nbtk, but since it's summer it uses
Munich's most famous Beergarden instead of places in London ;)
"""
import clutter
import nbtk
def title_changed_cb(box, pspec):
print 'title now:', box.get_title()
def index_changed_cb(box, pspec):
print 'index now:', box.get_index()
def stage_key_press_cb(actor, event, box):
from clutter import keysyms
if event.keyval == keysyms.r:
box.set_title('Munich')
elif event.keyval >= ord('0') and event.keyval <= ord('9'):
box.set_index(event.keyval - 48)
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
combo = nbtk.ComboBox()
stage.add(combo)
combo.set_title('Munich')
combo.append_text('Augustinerkeller')
combo.append_text('Hirschgarten')
combo.append_text('Nockherberg')
combo.append_text('Seehaus')
combo.append_text('Chinesischer Turm')
combo.append_text('Zum Flaucher')
combo.connect('notify::title', title_changed_cb)
combo.connect('notify::index', index_changed_cb)
stage.connect('key-press-event', stage_key_press_cb, combo)
stage.show()
clutter.main()
|
<commit_before><commit_msg>Add a simple test for ComboBox<commit_after>
|
"""
This test is adopted form nbtk, but since it's summer it uses
Munich's most famous Beergarden instead of places in London ;)
"""
import clutter
import nbtk
def title_changed_cb(box, pspec):
print 'title now:', box.get_title()
def index_changed_cb(box, pspec):
print 'index now:', box.get_index()
def stage_key_press_cb(actor, event, box):
from clutter import keysyms
if event.keyval == keysyms.r:
box.set_title('Munich')
elif event.keyval >= ord('0') and event.keyval <= ord('9'):
box.set_index(event.keyval - 48)
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
combo = nbtk.ComboBox()
stage.add(combo)
combo.set_title('Munich')
combo.append_text('Augustinerkeller')
combo.append_text('Hirschgarten')
combo.append_text('Nockherberg')
combo.append_text('Seehaus')
combo.append_text('Chinesischer Turm')
combo.append_text('Zum Flaucher')
combo.connect('notify::title', title_changed_cb)
combo.connect('notify::index', index_changed_cb)
stage.connect('key-press-event', stage_key_press_cb, combo)
stage.show()
clutter.main()
|
Add a simple test for ComboBox"""
This test is adopted form nbtk, but since it's summer it uses
Munich's most famous Beergarden instead of places in London ;)
"""
import clutter
import nbtk
def title_changed_cb(box, pspec):
print 'title now:', box.get_title()
def index_changed_cb(box, pspec):
print 'index now:', box.get_index()
def stage_key_press_cb(actor, event, box):
from clutter import keysyms
if event.keyval == keysyms.r:
box.set_title('Munich')
elif event.keyval >= ord('0') and event.keyval <= ord('9'):
box.set_index(event.keyval - 48)
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
combo = nbtk.ComboBox()
stage.add(combo)
combo.set_title('Munich')
combo.append_text('Augustinerkeller')
combo.append_text('Hirschgarten')
combo.append_text('Nockherberg')
combo.append_text('Seehaus')
combo.append_text('Chinesischer Turm')
combo.append_text('Zum Flaucher')
combo.connect('notify::title', title_changed_cb)
combo.connect('notify::index', index_changed_cb)
stage.connect('key-press-event', stage_key_press_cb, combo)
stage.show()
clutter.main()
|
<commit_before><commit_msg>Add a simple test for ComboBox<commit_after>"""
This test is adopted form nbtk, but since it's summer it uses
Munich's most famous Beergarden instead of places in London ;)
"""
import clutter
import nbtk
def title_changed_cb(box, pspec):
print 'title now:', box.get_title()
def index_changed_cb(box, pspec):
print 'index now:', box.get_index()
def stage_key_press_cb(actor, event, box):
from clutter import keysyms
if event.keyval == keysyms.r:
box.set_title('Munich')
elif event.keyval >= ord('0') and event.keyval <= ord('9'):
box.set_index(event.keyval - 48)
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
combo = nbtk.ComboBox()
stage.add(combo)
combo.set_title('Munich')
combo.append_text('Augustinerkeller')
combo.append_text('Hirschgarten')
combo.append_text('Nockherberg')
combo.append_text('Seehaus')
combo.append_text('Chinesischer Turm')
combo.append_text('Zum Flaucher')
combo.connect('notify::title', title_changed_cb)
combo.connect('notify::index', index_changed_cb)
stage.connect('key-press-event', stage_key_press_cb, combo)
stage.show()
clutter.main()
|
|
4fb1a760489d30dcc93bdac8209bc6eeceb81bd8
|
piperlearn/visual/statsplot.py
|
piperlearn/visual/statsplot.py
|
import seaborn as sns
import matplotlib.pyplot as plt
from ..utility.validation import check_dataframe, check_cols
def compute_correlation(data=None, cols=None, method='pearson'):
datasets = check_dataframe(data, cols)
return datasets.corr(method)
class correlations(object):
def __init__(self, data, cols=None):
self.corr = compute_correlation(data, cols)
def plot_heatmap(self, figsize, annot=False, fmt='.2g'):
plt.figure(figsize=figsize)
sns.heatmap(self.corr, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_individual(self, figsize, target):
plot_single(self.corr, target, figsize, sorted=True, abs=True, horizontal=True)
def plot_single(self, data, target, figsize, column=None, sorted=False, abs=False, horizontal=True):
dataset = check_dataframe(data, column)
check_cols(dataset, target)
if sorted:
if abs:
dataset['sorted'] = dataset[target].abs()
else:
dataset['sorted'] = dataset[target]
dataset.sort(columns='sorted', inplace=True)
dataset.drop('sorted', axis=1)
if horizontal:
ax = dataset[target].plot.barh(figsize=figsize)
for p in ax.patches:
if p.get_width() > 0:
ax.text(p.get_width() + 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax.text(p.get_width() - 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax = dataset[target].plot.bar(figsize=figsize)
for p in ax.patches:
if p.get_height() > 0:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
else:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
|
Add utility for validation and plot for heatmap
|
Add utility for validation and plot for heatmap
|
Python
|
mit
|
dolremi/PiperLearn
|
Add utility for validation and plot for heatmap
|
import seaborn as sns
import matplotlib.pyplot as plt
from ..utility.validation import check_dataframe, check_cols
def compute_correlation(data=None, cols=None, method='pearson'):
datasets = check_dataframe(data, cols)
return datasets.corr(method)
class correlations(object):
def __init__(self, data, cols=None):
self.corr = compute_correlation(data, cols)
def plot_heatmap(self, figsize, annot=False, fmt='.2g'):
plt.figure(figsize=figsize)
sns.heatmap(self.corr, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_individual(self, figsize, target):
plot_single(self.corr, target, figsize, sorted=True, abs=True, horizontal=True)
def plot_single(self, data, target, figsize, column=None, sorted=False, abs=False, horizontal=True):
dataset = check_dataframe(data, column)
check_cols(dataset, target)
if sorted:
if abs:
dataset['sorted'] = dataset[target].abs()
else:
dataset['sorted'] = dataset[target]
dataset.sort(columns='sorted', inplace=True)
dataset.drop('sorted', axis=1)
if horizontal:
ax = dataset[target].plot.barh(figsize=figsize)
for p in ax.patches:
if p.get_width() > 0:
ax.text(p.get_width() + 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax.text(p.get_width() - 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax = dataset[target].plot.bar(figsize=figsize)
for p in ax.patches:
if p.get_height() > 0:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
else:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
|
<commit_before><commit_msg>Add utility for validation and plot for heatmap<commit_after>
|
import seaborn as sns
import matplotlib.pyplot as plt
from ..utility.validation import check_dataframe, check_cols
def compute_correlation(data=None, cols=None, method='pearson'):
datasets = check_dataframe(data, cols)
return datasets.corr(method)
class correlations(object):
def __init__(self, data, cols=None):
self.corr = compute_correlation(data, cols)
def plot_heatmap(self, figsize, annot=False, fmt='.2g'):
plt.figure(figsize=figsize)
sns.heatmap(self.corr, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_individual(self, figsize, target):
plot_single(self.corr, target, figsize, sorted=True, abs=True, horizontal=True)
def plot_single(self, data, target, figsize, column=None, sorted=False, abs=False, horizontal=True):
dataset = check_dataframe(data, column)
check_cols(dataset, target)
if sorted:
if abs:
dataset['sorted'] = dataset[target].abs()
else:
dataset['sorted'] = dataset[target]
dataset.sort(columns='sorted', inplace=True)
dataset.drop('sorted', axis=1)
if horizontal:
ax = dataset[target].plot.barh(figsize=figsize)
for p in ax.patches:
if p.get_width() > 0:
ax.text(p.get_width() + 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax.text(p.get_width() - 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax = dataset[target].plot.bar(figsize=figsize)
for p in ax.patches:
if p.get_height() > 0:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
else:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
|
Add utility for validation and plot for heatmapimport seaborn as sns
import matplotlib.pyplot as plt
from ..utility.validation import check_dataframe, check_cols
def compute_correlation(data=None, cols=None, method='pearson'):
datasets = check_dataframe(data, cols)
return datasets.corr(method)
class correlations(object):
def __init__(self, data, cols=None):
self.corr = compute_correlation(data, cols)
def plot_heatmap(self, figsize, annot=False, fmt='.2g'):
plt.figure(figsize=figsize)
sns.heatmap(self.corr, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_individual(self, figsize, target):
plot_single(self.corr, target, figsize, sorted=True, abs=True, horizontal=True)
def plot_single(self, data, target, figsize, column=None, sorted=False, abs=False, horizontal=True):
dataset = check_dataframe(data, column)
check_cols(dataset, target)
if sorted:
if abs:
dataset['sorted'] = dataset[target].abs()
else:
dataset['sorted'] = dataset[target]
dataset.sort(columns='sorted', inplace=True)
dataset.drop('sorted', axis=1)
if horizontal:
ax = dataset[target].plot.barh(figsize=figsize)
for p in ax.patches:
if p.get_width() > 0:
ax.text(p.get_width() + 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax.text(p.get_width() - 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax = dataset[target].plot.bar(figsize=figsize)
for p in ax.patches:
if p.get_height() > 0:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
else:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
|
<commit_before><commit_msg>Add utility for validation and plot for heatmap<commit_after>import seaborn as sns
import matplotlib.pyplot as plt
from ..utility.validation import check_dataframe, check_cols
def compute_correlation(data=None, cols=None, method='pearson'):
datasets = check_dataframe(data, cols)
return datasets.corr(method)
class correlations(object):
def __init__(self, data, cols=None):
self.corr = compute_correlation(data, cols)
def plot_heatmap(self, figsize, annot=False, fmt='.2g'):
plt.figure(figsize=figsize)
sns.heatmap(self.corr, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_individual(self, figsize, target):
plot_single(self.corr, target, figsize, sorted=True, abs=True, horizontal=True)
def plot_single(self, data, target, figsize, column=None, sorted=False, abs=False, horizontal=True):
dataset = check_dataframe(data, column)
check_cols(dataset, target)
if sorted:
if abs:
dataset['sorted'] = dataset[target].abs()
else:
dataset['sorted'] = dataset[target]
dataset.sort(columns='sorted', inplace=True)
dataset.drop('sorted', axis=1)
if horizontal:
ax = dataset[target].plot.barh(figsize=figsize)
for p in ax.patches:
if p.get_width() > 0:
ax.text(p.get_width() + 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax.text(p.get_width() - 0.01, p.get_y() + 0.15, str(round((p.get_width()), 4)), color='dimgrey')
else:
ax = dataset[target].plot.bar(figsize=figsize)
for p in ax.patches:
if p.get_height() > 0:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
else:
ax.text(p.get_x() - 0.01, p.get_height() + 0.15, str(round((p.get_height()), 4)), color='dimgrey')
|
|
e5e023cdbaf6996a6f26726ee63f551d5b73b313
|
examples/voc/download_models.py
|
examples/voc/download_models.py
|
#!/usr/bin/env python
import os.path as osp
import fcn
def main():
# models converted from caffe
path = fcn.data.download_vgg16_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn32s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn16s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn8s_chainermodel()
print('==> downloaded to: %s' % path)
if __name__ == '__main__':
main()
|
Add script to download pre-trained models
|
Add script to download pre-trained models
|
Python
|
mit
|
wkentaro/fcn
|
Add script to download pre-trained models
|
#!/usr/bin/env python
import os.path as osp
import fcn
def main():
# models converted from caffe
path = fcn.data.download_vgg16_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn32s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn16s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn8s_chainermodel()
print('==> downloaded to: %s' % path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to download pre-trained models<commit_after>
|
#!/usr/bin/env python
import os.path as osp
import fcn
def main():
# models converted from caffe
path = fcn.data.download_vgg16_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn32s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn16s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn8s_chainermodel()
print('==> downloaded to: %s' % path)
if __name__ == '__main__':
main()
|
Add script to download pre-trained models#!/usr/bin/env python
import os.path as osp
import fcn
def main():
# models converted from caffe
path = fcn.data.download_vgg16_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn32s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn16s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn8s_chainermodel()
print('==> downloaded to: %s' % path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to download pre-trained models<commit_after>#!/usr/bin/env python
import os.path as osp
import fcn
def main():
# models converted from caffe
path = fcn.data.download_vgg16_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn32s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn16s_chainermodel()
print('==> downloaded to: %s' % path)
path = fcn.data.download_fcn8s_chainermodel()
print('==> downloaded to: %s' % path)
if __name__ == '__main__':
main()
|
|
ffae26e0a2f9ffa9aca1786d2ef4928e7eb2b401
|
pycket/test/test_envext.py
|
pycket/test/test_envext.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Testing environmental and external behavior
#
import pytest
from pycket.test.test_basic import run_fix, run
from pycket.targetpycket import main
from rpython.rlib import jit
def pytest_funcarg__json(request):
tmpdir = request.getfuncargvalue('tmpdir')
assert request.function.__doc__ is not None
json = tmpdir / "ast.json"
json.write(request.function.__doc__)
return str(json)
def pytest_funcarg__empty_json(request):
def make_filename():
import inspect, py
module_file = inspect.getmodule(request.function).__file__
return str(py.path.local(module_file).dirpath("empty.json"))
return request.cached_setup(setup=make_filename, scope="session")
class TestCommandline(object):
def test_no_argv(self):
assert main(['arg0']) == 3
def test_one_arg(self, empty_json):
assert main(['arg0', empty_json]) == 0
def test_jitarg_fail(self, empty_json):
with pytest.raises(ValueError):
main(['arg0', '--jit', empty_json])
assert main(['arg0', empty_json, '--jit']) == 2
def test_jitarg_works(self, empty_json):
assert main(['arg0', '--jit', 'trace_limit=30000',empty_json]) == 0
assert main(['arg0', empty_json, '--jit', 'trace_limit=30000']) == 0
|
Add test for external and environmental stuff
|
Add test for external and environmental stuff
Here: Commandline
|
Python
|
mit
|
krono/pycket,magnusmorton/pycket,vishesh/pycket,krono/pycket,cderici/pycket,cderici/pycket,magnusmorton/pycket,vishesh/pycket,samth/pycket,vishesh/pycket,samth/pycket,krono/pycket,pycket/pycket,samth/pycket,magnusmorton/pycket,cderici/pycket,pycket/pycket,pycket/pycket
|
Add test for external and environmental stuff
Here: Commandline
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Testing environmental and external behavior
#
import pytest
from pycket.test.test_basic import run_fix, run
from pycket.targetpycket import main
from rpython.rlib import jit
def pytest_funcarg__json(request):
tmpdir = request.getfuncargvalue('tmpdir')
assert request.function.__doc__ is not None
json = tmpdir / "ast.json"
json.write(request.function.__doc__)
return str(json)
def pytest_funcarg__empty_json(request):
def make_filename():
import inspect, py
module_file = inspect.getmodule(request.function).__file__
return str(py.path.local(module_file).dirpath("empty.json"))
return request.cached_setup(setup=make_filename, scope="session")
class TestCommandline(object):
def test_no_argv(self):
assert main(['arg0']) == 3
def test_one_arg(self, empty_json):
assert main(['arg0', empty_json]) == 0
def test_jitarg_fail(self, empty_json):
with pytest.raises(ValueError):
main(['arg0', '--jit', empty_json])
assert main(['arg0', empty_json, '--jit']) == 2
def test_jitarg_works(self, empty_json):
assert main(['arg0', '--jit', 'trace_limit=30000',empty_json]) == 0
assert main(['arg0', empty_json, '--jit', 'trace_limit=30000']) == 0
|
<commit_before><commit_msg>Add test for external and environmental stuff
Here: Commandline<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Testing environmental and external behavior
#
import pytest
from pycket.test.test_basic import run_fix, run
from pycket.targetpycket import main
from rpython.rlib import jit
def pytest_funcarg__json(request):
tmpdir = request.getfuncargvalue('tmpdir')
assert request.function.__doc__ is not None
json = tmpdir / "ast.json"
json.write(request.function.__doc__)
return str(json)
def pytest_funcarg__empty_json(request):
def make_filename():
import inspect, py
module_file = inspect.getmodule(request.function).__file__
return str(py.path.local(module_file).dirpath("empty.json"))
return request.cached_setup(setup=make_filename, scope="session")
class TestCommandline(object):
def test_no_argv(self):
assert main(['arg0']) == 3
def test_one_arg(self, empty_json):
assert main(['arg0', empty_json]) == 0
def test_jitarg_fail(self, empty_json):
with pytest.raises(ValueError):
main(['arg0', '--jit', empty_json])
assert main(['arg0', empty_json, '--jit']) == 2
def test_jitarg_works(self, empty_json):
assert main(['arg0', '--jit', 'trace_limit=30000',empty_json]) == 0
assert main(['arg0', empty_json, '--jit', 'trace_limit=30000']) == 0
|
Add test for external and environmental stuff
Here: Commandline#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Testing environmental and external behavior
#
import pytest
from pycket.test.test_basic import run_fix, run
from pycket.targetpycket import main
from rpython.rlib import jit
def pytest_funcarg__json(request):
tmpdir = request.getfuncargvalue('tmpdir')
assert request.function.__doc__ is not None
json = tmpdir / "ast.json"
json.write(request.function.__doc__)
return str(json)
def pytest_funcarg__empty_json(request):
def make_filename():
import inspect, py
module_file = inspect.getmodule(request.function).__file__
return str(py.path.local(module_file).dirpath("empty.json"))
return request.cached_setup(setup=make_filename, scope="session")
class TestCommandline(object):
def test_no_argv(self):
assert main(['arg0']) == 3
def test_one_arg(self, empty_json):
assert main(['arg0', empty_json]) == 0
def test_jitarg_fail(self, empty_json):
with pytest.raises(ValueError):
main(['arg0', '--jit', empty_json])
assert main(['arg0', empty_json, '--jit']) == 2
def test_jitarg_works(self, empty_json):
assert main(['arg0', '--jit', 'trace_limit=30000',empty_json]) == 0
assert main(['arg0', empty_json, '--jit', 'trace_limit=30000']) == 0
|
<commit_before><commit_msg>Add test for external and environmental stuff
Here: Commandline<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Testing environmental and external behavior
#
import pytest
from pycket.test.test_basic import run_fix, run
from pycket.targetpycket import main
from rpython.rlib import jit
def pytest_funcarg__json(request):
tmpdir = request.getfuncargvalue('tmpdir')
assert request.function.__doc__ is not None
json = tmpdir / "ast.json"
json.write(request.function.__doc__)
return str(json)
def pytest_funcarg__empty_json(request):
def make_filename():
import inspect, py
module_file = inspect.getmodule(request.function).__file__
return str(py.path.local(module_file).dirpath("empty.json"))
return request.cached_setup(setup=make_filename, scope="session")
class TestCommandline(object):
def test_no_argv(self):
assert main(['arg0']) == 3
def test_one_arg(self, empty_json):
assert main(['arg0', empty_json]) == 0
def test_jitarg_fail(self, empty_json):
with pytest.raises(ValueError):
main(['arg0', '--jit', empty_json])
assert main(['arg0', empty_json, '--jit']) == 2
def test_jitarg_works(self, empty_json):
assert main(['arg0', '--jit', 'trace_limit=30000',empty_json]) == 0
assert main(['arg0', empty_json, '--jit', 'trace_limit=30000']) == 0
|
|
ff8cf64313e629b688d370768acac90273700a31
|
python/function_sigtest.py
|
python/function_sigtest.py
|
# makes total sense
# syntax error in Python 2
# works in Python 3
def func(positional, *optional, defaulted='missing'):
print(positional, optional, defaulted)
func('first', 'second', 'third')
|
Add function signature / argument specification oddity
|
Add function signature / argument specification oddity
|
Python
|
mit
|
chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox
|
Add function signature / argument specification oddity
|
# makes total sense
# syntax error in Python 2
# works in Python 3
def func(positional, *optional, defaulted='missing'):
print(positional, optional, defaulted)
func('first', 'second', 'third')
|
<commit_before><commit_msg>Add function signature / argument specification oddity<commit_after>
|
# makes total sense
# syntax error in Python 2
# works in Python 3
def func(positional, *optional, defaulted='missing'):
print(positional, optional, defaulted)
func('first', 'second', 'third')
|
Add function signature / argument specification oddity# makes total sense
# syntax error in Python 2
# works in Python 3
def func(positional, *optional, defaulted='missing'):
print(positional, optional, defaulted)
func('first', 'second', 'third')
|
<commit_before><commit_msg>Add function signature / argument specification oddity<commit_after># makes total sense
# syntax error in Python 2
# works in Python 3
def func(positional, *optional, defaulted='missing'):
print(positional, optional, defaulted)
func('first', 'second', 'third')
|
|
fd96efd8c4ec457d326a37a3b1a3f8f026c90bf6
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Human.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Human.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Human(EventState):
'''
Add an human to Wonderland. For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
># gender string Gender of the person
># is_operator boolean Say if it is the operator
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Human, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos', 'gender',
'is_operator'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/human/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
#read response
data_response = json.loads(response.content)
#have a response
if not data_response:
return 'error'
#have an id to read
if 'id' not in data_response:
# continue to Error
return 'error'
#return the ID
userdata.id = data_response['id']
return 'done'
|
Create a state for add an human
|
Create a state for add an human
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Create a state for add an human
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Human(EventState):
'''
Add an human to Wonderland. For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
># gender string Gender of the person
># is_operator boolean Say if it is the operator
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Human, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos', 'gender',
'is_operator'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/human/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
#read response
data_response = json.loads(response.content)
#have a response
if not data_response:
return 'error'
#have an id to read
if 'id' not in data_response:
# continue to Error
return 'error'
#return the ID
userdata.id = data_response['id']
return 'done'
|
<commit_before><commit_msg>Create a state for add an human<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Human(EventState):
'''
Add an human to Wonderland. For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
># gender string Gender of the person
># is_operator boolean Say if it is the operator
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Human, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos', 'gender',
'is_operator'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/human/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
#read response
data_response = json.loads(response.content)
#have a response
if not data_response:
return 'error'
#have an id to read
if 'id' not in data_response:
# continue to Error
return 'error'
#return the ID
userdata.id = data_response['id']
return 'done'
|
Create a state for add an human#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Human(EventState):
'''
Add an human to Wonderland. For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
># gender string Gender of the person
># is_operator boolean Say if it is the operator
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Human, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos', 'gender',
'is_operator'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/human/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
#read response
data_response = json.loads(response.content)
#have a response
if not data_response:
return 'error'
#have an id to read
if 'id' not in data_response:
# continue to Error
return 'error'
#return the ID
userdata.id = data_response['id']
return 'done'
|
<commit_before><commit_msg>Create a state for add an human<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Human(EventState):
'''
Add an human to Wonderland. For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
># gender string Gender of the person
># is_operator boolean Say if it is the operator
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Human, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos', 'gender',
'is_operator'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'gender': userdata.gender, 'operator': userdata.is_operator, 'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/human/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
#read response
data_response = json.loads(response.content)
#have a response
if not data_response:
return 'error'
#have an id to read
if 'id' not in data_response:
# continue to Error
return 'error'
#return the ID
userdata.id = data_response['id']
return 'done'
|
|
89028d0d737a32edd165c1226e10ab764f6b0d41
|
generate_data.py
|
generate_data.py
|
# DATA ACQUISITION
import pymongo
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
from ophyd.sim import det, motor
class MongoInsertCallback:
"""
This is a replacmenet for db.insert.
"""
def __init__(self, uri):
self._uri = uri
self._client = pymongo.MongoClient(uri)
try:
# Called with no args, get_database() returns the database
# specified in the uri --- or raises if there was none. There is no
# public method for checking this in advance, so we just catch the
# error.
db = self._client.get_database()
except pymongo.errors.ConfigurationError as err:
raise ValueError(
"Invalid uri. Did you forget to include a database?") from err
self._run_start_collection = db.get_collection('run_start')
self._run_stop_collection = db.get_collection('run_stop')
self._event_descriptor_collection = db.get_collection('event_descriptor')
self._event_collection = db.get_collection('event')
def __call__(self, name, doc):
getattr(self, name)(doc)
def start(self, doc):
self._run_start_collection.insert_one(doc)
def descriptor(self, doc):
self._event_descriptor_collection.insert_one(doc)
def event(self, doc):
self._event_collection.insert_one(doc)
def stop(self, doc):
self._run_stop_collection.insert_one(doc)
uri = 'mongodb://localhost:27017/test1'
RE = RunEngine({})
sd = SupplementalData(baseline=[motor])
RE.preprocessors.append(sd)
RE.subscribe(MongoInsertCallback(uri))
uid, = RE(scan([det], motor, -1, 1, 20))
|
Make a separate script for data generation.
|
Make a separate script for data generation.
|
Python
|
bsd-3-clause
|
ericdill/databroker,ericdill/databroker
|
Make a separate script for data generation.
|
# DATA ACQUISITION
import pymongo
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
from ophyd.sim import det, motor
class MongoInsertCallback:
"""
This is a replacmenet for db.insert.
"""
def __init__(self, uri):
self._uri = uri
self._client = pymongo.MongoClient(uri)
try:
# Called with no args, get_database() returns the database
# specified in the uri --- or raises if there was none. There is no
# public method for checking this in advance, so we just catch the
# error.
db = self._client.get_database()
except pymongo.errors.ConfigurationError as err:
raise ValueError(
"Invalid uri. Did you forget to include a database?") from err
self._run_start_collection = db.get_collection('run_start')
self._run_stop_collection = db.get_collection('run_stop')
self._event_descriptor_collection = db.get_collection('event_descriptor')
self._event_collection = db.get_collection('event')
def __call__(self, name, doc):
getattr(self, name)(doc)
def start(self, doc):
self._run_start_collection.insert_one(doc)
def descriptor(self, doc):
self._event_descriptor_collection.insert_one(doc)
def event(self, doc):
self._event_collection.insert_one(doc)
def stop(self, doc):
self._run_stop_collection.insert_one(doc)
uri = 'mongodb://localhost:27017/test1'
RE = RunEngine({})
sd = SupplementalData(baseline=[motor])
RE.preprocessors.append(sd)
RE.subscribe(MongoInsertCallback(uri))
uid, = RE(scan([det], motor, -1, 1, 20))
|
<commit_before><commit_msg>Make a separate script for data generation.<commit_after>
|
# DATA ACQUISITION
import pymongo
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
from ophyd.sim import det, motor
class MongoInsertCallback:
"""
This is a replacmenet for db.insert.
"""
def __init__(self, uri):
self._uri = uri
self._client = pymongo.MongoClient(uri)
try:
# Called with no args, get_database() returns the database
# specified in the uri --- or raises if there was none. There is no
# public method for checking this in advance, so we just catch the
# error.
db = self._client.get_database()
except pymongo.errors.ConfigurationError as err:
raise ValueError(
"Invalid uri. Did you forget to include a database?") from err
self._run_start_collection = db.get_collection('run_start')
self._run_stop_collection = db.get_collection('run_stop')
self._event_descriptor_collection = db.get_collection('event_descriptor')
self._event_collection = db.get_collection('event')
def __call__(self, name, doc):
getattr(self, name)(doc)
def start(self, doc):
self._run_start_collection.insert_one(doc)
def descriptor(self, doc):
self._event_descriptor_collection.insert_one(doc)
def event(self, doc):
self._event_collection.insert_one(doc)
def stop(self, doc):
self._run_stop_collection.insert_one(doc)
uri = 'mongodb://localhost:27017/test1'
RE = RunEngine({})
sd = SupplementalData(baseline=[motor])
RE.preprocessors.append(sd)
RE.subscribe(MongoInsertCallback(uri))
uid, = RE(scan([det], motor, -1, 1, 20))
|
Make a separate script for data generation.# DATA ACQUISITION
import pymongo
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
from ophyd.sim import det, motor
class MongoInsertCallback:
"""
This is a replacmenet for db.insert.
"""
def __init__(self, uri):
self._uri = uri
self._client = pymongo.MongoClient(uri)
try:
# Called with no args, get_database() returns the database
# specified in the uri --- or raises if there was none. There is no
# public method for checking this in advance, so we just catch the
# error.
db = self._client.get_database()
except pymongo.errors.ConfigurationError as err:
raise ValueError(
"Invalid uri. Did you forget to include a database?") from err
self._run_start_collection = db.get_collection('run_start')
self._run_stop_collection = db.get_collection('run_stop')
self._event_descriptor_collection = db.get_collection('event_descriptor')
self._event_collection = db.get_collection('event')
def __call__(self, name, doc):
getattr(self, name)(doc)
def start(self, doc):
self._run_start_collection.insert_one(doc)
def descriptor(self, doc):
self._event_descriptor_collection.insert_one(doc)
def event(self, doc):
self._event_collection.insert_one(doc)
def stop(self, doc):
self._run_stop_collection.insert_one(doc)
uri = 'mongodb://localhost:27017/test1'
RE = RunEngine({})
sd = SupplementalData(baseline=[motor])
RE.preprocessors.append(sd)
RE.subscribe(MongoInsertCallback(uri))
uid, = RE(scan([det], motor, -1, 1, 20))
|
<commit_before><commit_msg>Make a separate script for data generation.<commit_after># DATA ACQUISITION
import pymongo
from bluesky import RunEngine
from bluesky.plans import scan
from bluesky.preprocessors import SupplementalData
from ophyd.sim import det, motor
class MongoInsertCallback:
"""
This is a replacmenet for db.insert.
"""
def __init__(self, uri):
self._uri = uri
self._client = pymongo.MongoClient(uri)
try:
# Called with no args, get_database() returns the database
# specified in the uri --- or raises if there was none. There is no
# public method for checking this in advance, so we just catch the
# error.
db = self._client.get_database()
except pymongo.errors.ConfigurationError as err:
raise ValueError(
"Invalid uri. Did you forget to include a database?") from err
self._run_start_collection = db.get_collection('run_start')
self._run_stop_collection = db.get_collection('run_stop')
self._event_descriptor_collection = db.get_collection('event_descriptor')
self._event_collection = db.get_collection('event')
def __call__(self, name, doc):
getattr(self, name)(doc)
def start(self, doc):
self._run_start_collection.insert_one(doc)
def descriptor(self, doc):
self._event_descriptor_collection.insert_one(doc)
def event(self, doc):
self._event_collection.insert_one(doc)
def stop(self, doc):
self._run_stop_collection.insert_one(doc)
uri = 'mongodb://localhost:27017/test1'
RE = RunEngine({})
sd = SupplementalData(baseline=[motor])
RE.preprocessors.append(sd)
RE.subscribe(MongoInsertCallback(uri))
uid, = RE(scan([det], motor, -1, 1, 20))
|
|
f421f66cafa8d884af4ab2e4d43cad44743a6ffc
|
fun/necrodancer-shuffle.py
|
fun/necrodancer-shuffle.py
|
#!/usr/bin/env python3
import random
ZONES = 5
FLOORS = 3
BOSSES = [
'Deep Blues',
'King Conga',
'Death Metal',
'Coral Riff',
'Fortissimole',
]
SONGS = {
(1, 1): 'Disco Descent',
(1, 2): 'Crypteque',
(1, 3): 'Mausoleum Mash',
(2, 1): 'Fungal Funk',
(2, 2): 'Grave Throbbing',
(2, 3): 'Portabellohead',
(3, 1): 'Stone Cold / Igneous Rock',
(3, 2): 'Dance of the Decorous / March of the Profane',
(3, 3): 'A Cold Sweat / A Hot Mess',
(4, 1): 'Styx and Stones',
(4, 2): 'Heart of the Crypt',
(4, 3): 'The Wight to Remain',
(5, 1): 'Voltzwaltz',
(5, 2): 'Power Cords',
(5, 3): 'Six Feet Thunder',
}
def get_floor_name(zone, floor):
song = SONGS[(zone, floor)]
return f'{zone}-{floor} {song}'
def get_boss_name(zone, boss):
return f' {boss} {zone}'
def generate_floor_shuffle():
# Generate random bosses
bosses = list(BOSSES)
random.shuffle(bosses)
# Generate shuffled floor levels
floors = []
for _ in range(FLOORS):
zones = list(range(1, ZONES + 1))
random.shuffle(zones)
floors.append(zones)
# Generate level specifications
levels = []
for zone_idx in range(ZONES):
# Get each of the three floors for the zone
for floor_idx in range(FLOORS):
zone = floors[floor_idx][zone_idx]
floor = floor_idx + 1
levels.append(get_floor_name(zone, floor))
# Get the boss for the zone
boss = bosses[zone_idx]
zone = zone_idx + 1
levels.append(get_boss_name(zone, boss))
return levels
if __name__ == '__main__':
levels = generate_floor_shuffle()
for i, level in enumerate(levels):
print(level)
if i % 4 == 3:
print()
|
Add Necrodancer floor shuffle script.
|
Add Necrodancer floor shuffle script.
|
Python
|
mit
|
ammongit/scripts,ammongit/scripts,ammongit/scripts,ammongit/scripts
|
Add Necrodancer floor shuffle script.
|
#!/usr/bin/env python3
import random
ZONES = 5
FLOORS = 3
BOSSES = [
'Deep Blues',
'King Conga',
'Death Metal',
'Coral Riff',
'Fortissimole',
]
SONGS = {
(1, 1): 'Disco Descent',
(1, 2): 'Crypteque',
(1, 3): 'Mausoleum Mash',
(2, 1): 'Fungal Funk',
(2, 2): 'Grave Throbbing',
(2, 3): 'Portabellohead',
(3, 1): 'Stone Cold / Igneous Rock',
(3, 2): 'Dance of the Decorous / March of the Profane',
(3, 3): 'A Cold Sweat / A Hot Mess',
(4, 1): 'Styx and Stones',
(4, 2): 'Heart of the Crypt',
(4, 3): 'The Wight to Remain',
(5, 1): 'Voltzwaltz',
(5, 2): 'Power Cords',
(5, 3): 'Six Feet Thunder',
}
def get_floor_name(zone, floor):
song = SONGS[(zone, floor)]
return f'{zone}-{floor} {song}'
def get_boss_name(zone, boss):
return f' {boss} {zone}'
def generate_floor_shuffle():
# Generate random bosses
bosses = list(BOSSES)
random.shuffle(bosses)
# Generate shuffled floor levels
floors = []
for _ in range(FLOORS):
zones = list(range(1, ZONES + 1))
random.shuffle(zones)
floors.append(zones)
# Generate level specifications
levels = []
for zone_idx in range(ZONES):
# Get each of the three floors for the zone
for floor_idx in range(FLOORS):
zone = floors[floor_idx][zone_idx]
floor = floor_idx + 1
levels.append(get_floor_name(zone, floor))
# Get the boss for the zone
boss = bosses[zone_idx]
zone = zone_idx + 1
levels.append(get_boss_name(zone, boss))
return levels
if __name__ == '__main__':
levels = generate_floor_shuffle()
for i, level in enumerate(levels):
print(level)
if i % 4 == 3:
print()
|
<commit_before><commit_msg>Add Necrodancer floor shuffle script.<commit_after>
|
#!/usr/bin/env python3
import random
ZONES = 5
FLOORS = 3
BOSSES = [
'Deep Blues',
'King Conga',
'Death Metal',
'Coral Riff',
'Fortissimole',
]
SONGS = {
(1, 1): 'Disco Descent',
(1, 2): 'Crypteque',
(1, 3): 'Mausoleum Mash',
(2, 1): 'Fungal Funk',
(2, 2): 'Grave Throbbing',
(2, 3): 'Portabellohead',
(3, 1): 'Stone Cold / Igneous Rock',
(3, 2): 'Dance of the Decorous / March of the Profane',
(3, 3): 'A Cold Sweat / A Hot Mess',
(4, 1): 'Styx and Stones',
(4, 2): 'Heart of the Crypt',
(4, 3): 'The Wight to Remain',
(5, 1): 'Voltzwaltz',
(5, 2): 'Power Cords',
(5, 3): 'Six Feet Thunder',
}
def get_floor_name(zone, floor):
song = SONGS[(zone, floor)]
return f'{zone}-{floor} {song}'
def get_boss_name(zone, boss):
return f' {boss} {zone}'
def generate_floor_shuffle():
# Generate random bosses
bosses = list(BOSSES)
random.shuffle(bosses)
# Generate shuffled floor levels
floors = []
for _ in range(FLOORS):
zones = list(range(1, ZONES + 1))
random.shuffle(zones)
floors.append(zones)
# Generate level specifications
levels = []
for zone_idx in range(ZONES):
# Get each of the three floors for the zone
for floor_idx in range(FLOORS):
zone = floors[floor_idx][zone_idx]
floor = floor_idx + 1
levels.append(get_floor_name(zone, floor))
# Get the boss for the zone
boss = bosses[zone_idx]
zone = zone_idx + 1
levels.append(get_boss_name(zone, boss))
return levels
if __name__ == '__main__':
levels = generate_floor_shuffle()
for i, level in enumerate(levels):
print(level)
if i % 4 == 3:
print()
|
Add Necrodancer floor shuffle script.#!/usr/bin/env python3
import random
ZONES = 5
FLOORS = 3
BOSSES = [
'Deep Blues',
'King Conga',
'Death Metal',
'Coral Riff',
'Fortissimole',
]
SONGS = {
(1, 1): 'Disco Descent',
(1, 2): 'Crypteque',
(1, 3): 'Mausoleum Mash',
(2, 1): 'Fungal Funk',
(2, 2): 'Grave Throbbing',
(2, 3): 'Portabellohead',
(3, 1): 'Stone Cold / Igneous Rock',
(3, 2): 'Dance of the Decorous / March of the Profane',
(3, 3): 'A Cold Sweat / A Hot Mess',
(4, 1): 'Styx and Stones',
(4, 2): 'Heart of the Crypt',
(4, 3): 'The Wight to Remain',
(5, 1): 'Voltzwaltz',
(5, 2): 'Power Cords',
(5, 3): 'Six Feet Thunder',
}
def get_floor_name(zone, floor):
song = SONGS[(zone, floor)]
return f'{zone}-{floor} {song}'
def get_boss_name(zone, boss):
return f' {boss} {zone}'
def generate_floor_shuffle():
# Generate random bosses
bosses = list(BOSSES)
random.shuffle(bosses)
# Generate shuffled floor levels
floors = []
for _ in range(FLOORS):
zones = list(range(1, ZONES + 1))
random.shuffle(zones)
floors.append(zones)
# Generate level specifications
levels = []
for zone_idx in range(ZONES):
# Get each of the three floors for the zone
for floor_idx in range(FLOORS):
zone = floors[floor_idx][zone_idx]
floor = floor_idx + 1
levels.append(get_floor_name(zone, floor))
# Get the boss for the zone
boss = bosses[zone_idx]
zone = zone_idx + 1
levels.append(get_boss_name(zone, boss))
return levels
if __name__ == '__main__':
levels = generate_floor_shuffle()
for i, level in enumerate(levels):
print(level)
if i % 4 == 3:
print()
|
<commit_before><commit_msg>Add Necrodancer floor shuffle script.<commit_after>#!/usr/bin/env python3
import random
ZONES = 5
FLOORS = 3
BOSSES = [
'Deep Blues',
'King Conga',
'Death Metal',
'Coral Riff',
'Fortissimole',
]
SONGS = {
(1, 1): 'Disco Descent',
(1, 2): 'Crypteque',
(1, 3): 'Mausoleum Mash',
(2, 1): 'Fungal Funk',
(2, 2): 'Grave Throbbing',
(2, 3): 'Portabellohead',
(3, 1): 'Stone Cold / Igneous Rock',
(3, 2): 'Dance of the Decorous / March of the Profane',
(3, 3): 'A Cold Sweat / A Hot Mess',
(4, 1): 'Styx and Stones',
(4, 2): 'Heart of the Crypt',
(4, 3): 'The Wight to Remain',
(5, 1): 'Voltzwaltz',
(5, 2): 'Power Cords',
(5, 3): 'Six Feet Thunder',
}
def get_floor_name(zone, floor):
song = SONGS[(zone, floor)]
return f'{zone}-{floor} {song}'
def get_boss_name(zone, boss):
return f' {boss} {zone}'
def generate_floor_shuffle():
# Generate random bosses
bosses = list(BOSSES)
random.shuffle(bosses)
# Generate shuffled floor levels
floors = []
for _ in range(FLOORS):
zones = list(range(1, ZONES + 1))
random.shuffle(zones)
floors.append(zones)
# Generate level specifications
levels = []
for zone_idx in range(ZONES):
# Get each of the three floors for the zone
for floor_idx in range(FLOORS):
zone = floors[floor_idx][zone_idx]
floor = floor_idx + 1
levels.append(get_floor_name(zone, floor))
# Get the boss for the zone
boss = bosses[zone_idx]
zone = zone_idx + 1
levels.append(get_boss_name(zone, boss))
return levels
if __name__ == '__main__':
levels = generate_floor_shuffle()
for i, level in enumerate(levels):
print(level)
if i % 4 == 3:
print()
|
|
e0bbbf6bc7ec1703b104dbb705fad8acdf818871
|
neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py
|
neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco Nexus multi-switch
Revision ID: 3a520dd165d0
Revises: 2528ceb28230
Create Date: 2013-09-28 15:23:38.872682
"""
# revision identifiers, used by Alembic.
revision = '3a520dd165d0'
down_revision = '2528ceb28230'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column(
'cisco_nexusport_bindings',
sa.Column('instance_id', sa.String(length=255), nullable=False))
op.add_column(
'cisco_nexusport_bindings',
sa.Column('switch_ip', sa.String(length=255), nullable=False))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('cisco_nexusport_bindings', 'switch_ip')
op.drop_column('cisco_nexusport_bindings', 'instance_id')
|
Add missing migration for fields in Cisco Nexus table
|
Add missing migration for fields in Cisco Nexus table
Change-Id: Ib31ec7f92d568c21d607b0bf003494acd1e3a3e8
Closes-bug: #1232360
|
Python
|
apache-2.0
|
gkotton/vmware-nsx,gkotton/vmware-nsx
|
Add missing migration for fields in Cisco Nexus table
Change-Id: Ib31ec7f92d568c21d607b0bf003494acd1e3a3e8
Closes-bug: #1232360
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco Nexus multi-switch
Revision ID: 3a520dd165d0
Revises: 2528ceb28230
Create Date: 2013-09-28 15:23:38.872682
"""
# revision identifiers, used by Alembic.
revision = '3a520dd165d0'
down_revision = '2528ceb28230'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column(
'cisco_nexusport_bindings',
sa.Column('instance_id', sa.String(length=255), nullable=False))
op.add_column(
'cisco_nexusport_bindings',
sa.Column('switch_ip', sa.String(length=255), nullable=False))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('cisco_nexusport_bindings', 'switch_ip')
op.drop_column('cisco_nexusport_bindings', 'instance_id')
|
<commit_before><commit_msg>Add missing migration for fields in Cisco Nexus table
Change-Id: Ib31ec7f92d568c21d607b0bf003494acd1e3a3e8
Closes-bug: #1232360<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco Nexus multi-switch
Revision ID: 3a520dd165d0
Revises: 2528ceb28230
Create Date: 2013-09-28 15:23:38.872682
"""
# revision identifiers, used by Alembic.
revision = '3a520dd165d0'
down_revision = '2528ceb28230'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column(
'cisco_nexusport_bindings',
sa.Column('instance_id', sa.String(length=255), nullable=False))
op.add_column(
'cisco_nexusport_bindings',
sa.Column('switch_ip', sa.String(length=255), nullable=False))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('cisco_nexusport_bindings', 'switch_ip')
op.drop_column('cisco_nexusport_bindings', 'instance_id')
|
Add missing migration for fields in Cisco Nexus table
Change-Id: Ib31ec7f92d568c21d607b0bf003494acd1e3a3e8
Closes-bug: #1232360# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco Nexus multi-switch
Revision ID: 3a520dd165d0
Revises: 2528ceb28230
Create Date: 2013-09-28 15:23:38.872682
"""
# revision identifiers, used by Alembic.
revision = '3a520dd165d0'
down_revision = '2528ceb28230'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column(
'cisco_nexusport_bindings',
sa.Column('instance_id', sa.String(length=255), nullable=False))
op.add_column(
'cisco_nexusport_bindings',
sa.Column('switch_ip', sa.String(length=255), nullable=False))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('cisco_nexusport_bindings', 'switch_ip')
op.drop_column('cisco_nexusport_bindings', 'instance_id')
|
<commit_before><commit_msg>Add missing migration for fields in Cisco Nexus table
Change-Id: Ib31ec7f92d568c21d607b0bf003494acd1e3a3e8
Closes-bug: #1232360<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cisco Nexus multi-switch
Revision ID: 3a520dd165d0
Revises: 2528ceb28230
Create Date: 2013-09-28 15:23:38.872682
"""
# revision identifiers, used by Alembic.
revision = '3a520dd165d0'
down_revision = '2528ceb28230'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.add_column(
'cisco_nexusport_bindings',
sa.Column('instance_id', sa.String(length=255), nullable=False))
op.add_column(
'cisco_nexusport_bindings',
sa.Column('switch_ip', sa.String(length=255), nullable=False))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_column('cisco_nexusport_bindings', 'switch_ip')
op.drop_column('cisco_nexusport_bindings', 'instance_id')
|
|
dbf438e65a038652adffe04dd43949dbbfb19b5c
|
samples/cdrom.py
|
samples/cdrom.py
|
import ipaddr
from devops.helpers.helpers import SSHClient
def one(manager):
environment = manager.environment_create('cdrom')
internal_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
private_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
external_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('172.18.95.0/24')], prefix=27
)
internal = manager.network_create(
environment=environment, name='internal', pool=internal_pool)
external = manager.network_create(
environment=environment, name='external', pool=external_pool,
forward='nat')
private = manager.network_create(
environment=environment, name='private', pool=private_pool)
for i in range(1,2):
node = manager.node_create(name='test_node' + str(i), environment=environment)
manager.interface_create(node=node, network=internal)
manager.interface_create(node=node, network=external)
manager.interface_create(node=node, network=private)
volume = manager.volume_get_predefined(
'/var/lib/libvirt/images/centos63-cobbler-base.qcow2')
v3 = manager.volume_create_child('test_vp895' + str(i), backing_store=volume,
environment=environment)
v4 = manager.volume_create_child('test_vp896'+ str(i), backing_store=volume,
environment=environment)
manager.node_attach_volume(node=node, volume=v3)
manager.node_attach_volume(node, v4)
manager.node_attach_volume(node, manager.volume_get_predefined(
'/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso'), device='cdrom', bus='sata')
environment.define()
environment.start()
remotes = []
for node in environment.nodes:
node.await('internal')
node.remote('internal', 'root', 'r00tme').check_stderr('ls -la', verbose=True)
remotes.append(node.remote('internal', 'root', 'r00tme'))
SSHClient.execute_together(remotes, 'ls -la')
if __name__ == '__main__':
from devops.manager import Manager
one(Manager())
|
Add example with iso volume
|
Add example with iso volume
|
Python
|
apache-2.0
|
stackforge/fuel-devops,stackforge/fuel-devops
|
Add example with iso volume
|
import ipaddr
from devops.helpers.helpers import SSHClient
def one(manager):
environment = manager.environment_create('cdrom')
internal_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
private_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
external_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('172.18.95.0/24')], prefix=27
)
internal = manager.network_create(
environment=environment, name='internal', pool=internal_pool)
external = manager.network_create(
environment=environment, name='external', pool=external_pool,
forward='nat')
private = manager.network_create(
environment=environment, name='private', pool=private_pool)
for i in range(1,2):
node = manager.node_create(name='test_node' + str(i), environment=environment)
manager.interface_create(node=node, network=internal)
manager.interface_create(node=node, network=external)
manager.interface_create(node=node, network=private)
volume = manager.volume_get_predefined(
'/var/lib/libvirt/images/centos63-cobbler-base.qcow2')
v3 = manager.volume_create_child('test_vp895' + str(i), backing_store=volume,
environment=environment)
v4 = manager.volume_create_child('test_vp896'+ str(i), backing_store=volume,
environment=environment)
manager.node_attach_volume(node=node, volume=v3)
manager.node_attach_volume(node, v4)
manager.node_attach_volume(node, manager.volume_get_predefined(
'/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso'), device='cdrom', bus='sata')
environment.define()
environment.start()
remotes = []
for node in environment.nodes:
node.await('internal')
node.remote('internal', 'root', 'r00tme').check_stderr('ls -la', verbose=True)
remotes.append(node.remote('internal', 'root', 'r00tme'))
SSHClient.execute_together(remotes, 'ls -la')
if __name__ == '__main__':
from devops.manager import Manager
one(Manager())
|
<commit_before><commit_msg>Add example with iso volume<commit_after>
|
import ipaddr
from devops.helpers.helpers import SSHClient
def one(manager):
environment = manager.environment_create('cdrom')
internal_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
private_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
external_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('172.18.95.0/24')], prefix=27
)
internal = manager.network_create(
environment=environment, name='internal', pool=internal_pool)
external = manager.network_create(
environment=environment, name='external', pool=external_pool,
forward='nat')
private = manager.network_create(
environment=environment, name='private', pool=private_pool)
for i in range(1,2):
node = manager.node_create(name='test_node' + str(i), environment=environment)
manager.interface_create(node=node, network=internal)
manager.interface_create(node=node, network=external)
manager.interface_create(node=node, network=private)
volume = manager.volume_get_predefined(
'/var/lib/libvirt/images/centos63-cobbler-base.qcow2')
v3 = manager.volume_create_child('test_vp895' + str(i), backing_store=volume,
environment=environment)
v4 = manager.volume_create_child('test_vp896'+ str(i), backing_store=volume,
environment=environment)
manager.node_attach_volume(node=node, volume=v3)
manager.node_attach_volume(node, v4)
manager.node_attach_volume(node, manager.volume_get_predefined(
'/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso'), device='cdrom', bus='sata')
environment.define()
environment.start()
remotes = []
for node in environment.nodes:
node.await('internal')
node.remote('internal', 'root', 'r00tme').check_stderr('ls -la', verbose=True)
remotes.append(node.remote('internal', 'root', 'r00tme'))
SSHClient.execute_together(remotes, 'ls -la')
if __name__ == '__main__':
from devops.manager import Manager
one(Manager())
|
Add example with iso volumeimport ipaddr
from devops.helpers.helpers import SSHClient
def one(manager):
environment = manager.environment_create('cdrom')
internal_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
private_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
external_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('172.18.95.0/24')], prefix=27
)
internal = manager.network_create(
environment=environment, name='internal', pool=internal_pool)
external = manager.network_create(
environment=environment, name='external', pool=external_pool,
forward='nat')
private = manager.network_create(
environment=environment, name='private', pool=private_pool)
for i in range(1,2):
node = manager.node_create(name='test_node' + str(i), environment=environment)
manager.interface_create(node=node, network=internal)
manager.interface_create(node=node, network=external)
manager.interface_create(node=node, network=private)
volume = manager.volume_get_predefined(
'/var/lib/libvirt/images/centos63-cobbler-base.qcow2')
v3 = manager.volume_create_child('test_vp895' + str(i), backing_store=volume,
environment=environment)
v4 = manager.volume_create_child('test_vp896'+ str(i), backing_store=volume,
environment=environment)
manager.node_attach_volume(node=node, volume=v3)
manager.node_attach_volume(node, v4)
manager.node_attach_volume(node, manager.volume_get_predefined(
'/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso'), device='cdrom', bus='sata')
environment.define()
environment.start()
remotes = []
for node in environment.nodes:
node.await('internal')
node.remote('internal', 'root', 'r00tme').check_stderr('ls -la', verbose=True)
remotes.append(node.remote('internal', 'root', 'r00tme'))
SSHClient.execute_together(remotes, 'ls -la')
if __name__ == '__main__':
from devops.manager import Manager
one(Manager())
|
<commit_before><commit_msg>Add example with iso volume<commit_after>import ipaddr
from devops.helpers.helpers import SSHClient
def one(manager):
environment = manager.environment_create('cdrom')
internal_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
private_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('10.108.0.0/16')], prefix=24
)
external_pool = manager.create_network_pool(
networks=[ipaddr.IPNetwork('172.18.95.0/24')], prefix=27
)
internal = manager.network_create(
environment=environment, name='internal', pool=internal_pool)
external = manager.network_create(
environment=environment, name='external', pool=external_pool,
forward='nat')
private = manager.network_create(
environment=environment, name='private', pool=private_pool)
for i in range(1,2):
node = manager.node_create(name='test_node' + str(i), environment=environment)
manager.interface_create(node=node, network=internal)
manager.interface_create(node=node, network=external)
manager.interface_create(node=node, network=private)
volume = manager.volume_get_predefined(
'/var/lib/libvirt/images/centos63-cobbler-base.qcow2')
v3 = manager.volume_create_child('test_vp895' + str(i), backing_store=volume,
environment=environment)
v4 = manager.volume_create_child('test_vp896'+ str(i), backing_store=volume,
environment=environment)
manager.node_attach_volume(node=node, volume=v3)
manager.node_attach_volume(node, v4)
manager.node_attach_volume(node, manager.volume_get_predefined(
'/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso'), device='cdrom', bus='sata')
environment.define()
environment.start()
remotes = []
for node in environment.nodes:
node.await('internal')
node.remote('internal', 'root', 'r00tme').check_stderr('ls -la', verbose=True)
remotes.append(node.remote('internal', 'root', 'r00tme'))
SSHClient.execute_together(remotes, 'ls -la')
if __name__ == '__main__':
from devops.manager import Manager
one(Manager())
|
|
f3e8ffce89fe13dc46ba5f3823a1ef73d727c6f0
|
scripts/flatten_content.py
|
scripts/flatten_content.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os
from html2text import html2text
from logya.core import Logya
from logya.writer import encode_content, write
L = Logya()
L.init_env()
L.build_index()
for url, doc in L.docs.items():
content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
if os.path.exists(content_file):
body = html2text(doc['body'])
# Cleanup
del doc['body']
if 'tags_links' in doc:
del doc['tags_links']
content = encode_content(doc, body)
target_file = os.path.dirname(content_file) + '.md'
write(target_file.replace('/content/', '/content_new/'), content)
|
Add script that creates markdown versions of content files and flattens the content directory structure.
|
Add script that creates markdown versions of content files and flattens the content directory structure.
|
Python
|
mit
|
yaph/logya,elaOnMars/logya,elaOnMars/logya,elaOnMars/logya,yaph/logya
|
Add script that creates markdown versions of content files and flattens the content directory structure.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os
from html2text import html2text
from logya.core import Logya
from logya.writer import encode_content, write
L = Logya()
L.init_env()
L.build_index()
for url, doc in L.docs.items():
content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
if os.path.exists(content_file):
body = html2text(doc['body'])
# Cleanup
del doc['body']
if 'tags_links' in doc:
del doc['tags_links']
content = encode_content(doc, body)
target_file = os.path.dirname(content_file) + '.md'
write(target_file.replace('/content/', '/content_new/'), content)
|
<commit_before><commit_msg>Add script that creates markdown versions of content files and flattens the content directory structure.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os
from html2text import html2text
from logya.core import Logya
from logya.writer import encode_content, write
L = Logya()
L.init_env()
L.build_index()
for url, doc in L.docs.items():
content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
if os.path.exists(content_file):
body = html2text(doc['body'])
# Cleanup
del doc['body']
if 'tags_links' in doc:
del doc['tags_links']
content = encode_content(doc, body)
target_file = os.path.dirname(content_file) + '.md'
write(target_file.replace('/content/', '/content_new/'), content)
|
Add script that creates markdown versions of content files and flattens the content directory structure.#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os
from html2text import html2text
from logya.core import Logya
from logya.writer import encode_content, write
L = Logya()
L.init_env()
L.build_index()
for url, doc in L.docs.items():
content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
if os.path.exists(content_file):
body = html2text(doc['body'])
# Cleanup
del doc['body']
if 'tags_links' in doc:
del doc['tags_links']
content = encode_content(doc, body)
target_file = os.path.dirname(content_file) + '.md'
write(target_file.replace('/content/', '/content_new/'), content)
|
<commit_before><commit_msg>Add script that creates markdown versions of content files and flattens the content directory structure.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os
from html2text import html2text
from logya.core import Logya
from logya.writer import encode_content, write
L = Logya()
L.init_env()
L.build_index()
for url, doc in L.docs.items():
content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
if os.path.exists(content_file):
body = html2text(doc['body'])
# Cleanup
del doc['body']
if 'tags_links' in doc:
del doc['tags_links']
content = encode_content(doc, body)
target_file = os.path.dirname(content_file) + '.md'
write(target_file.replace('/content/', '/content_new/'), content)
|
|
cfb6e853b306b9b64293795b7f6f7dfe1ac334b3
|
tests/config_decorator_test.py
|
tests/config_decorator_test.py
|
import unittest
import webracer
class ConfigDecoratorTest(unittest.TestCase):
def test_config_decorator(self):
@webracer.config(host='decoratortesthost', port=5544)
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5544, config.port)
def test_multiple_config_decorators(self):
@webracer.config(host='decoratortesthost', port=5541)
@webracer.config(user_agent='decoratortestua')
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5541, config.port)
self.assertEqual('decoratortestua', config.user_agent)
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for multiple config decorators
|
Test for multiple config decorators
|
Python
|
bsd-2-clause
|
p/webracer
|
Test for multiple config decorators
|
import unittest
import webracer
class ConfigDecoratorTest(unittest.TestCase):
def test_config_decorator(self):
@webracer.config(host='decoratortesthost', port=5544)
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5544, config.port)
def test_multiple_config_decorators(self):
@webracer.config(host='decoratortesthost', port=5541)
@webracer.config(user_agent='decoratortestua')
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5541, config.port)
self.assertEqual('decoratortestua', config.user_agent)
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for multiple config decorators<commit_after>
|
import unittest
import webracer
class ConfigDecoratorTest(unittest.TestCase):
def test_config_decorator(self):
@webracer.config(host='decoratortesthost', port=5544)
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5544, config.port)
def test_multiple_config_decorators(self):
@webracer.config(host='decoratortesthost', port=5541)
@webracer.config(user_agent='decoratortestua')
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5541, config.port)
self.assertEqual('decoratortestua', config.user_agent)
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for multiple config decoratorsimport unittest
import webracer
class ConfigDecoratorTest(unittest.TestCase):
def test_config_decorator(self):
@webracer.config(host='decoratortesthost', port=5544)
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5544, config.port)
def test_multiple_config_decorators(self):
@webracer.config(host='decoratortesthost', port=5541)
@webracer.config(user_agent='decoratortestua')
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5541, config.port)
self.assertEqual('decoratortestua', config.user_agent)
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for multiple config decorators<commit_after>import unittest
import webracer
class ConfigDecoratorTest(unittest.TestCase):
def test_config_decorator(self):
@webracer.config(host='decoratortesthost', port=5544)
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5544, config.port)
def test_multiple_config_decorators(self):
@webracer.config(host='decoratortesthost', port=5541)
@webracer.config(user_agent='decoratortestua')
class test_class(webracer.WebTestCase):
def test_noop(self):
pass
instance = test_class('test_noop')
config = instance.config
self.assertEqual('decoratortesthost', config.host)
self.assertEqual(5541, config.port)
self.assertEqual('decoratortestua', config.user_agent)
if __name__ == '__main__':
import unittest
unittest.main()
|
|
0c68ae4fe747b6eb1ee140e43409c469c2f07de3
|
pidman/pid/migrations/0003_rm_invalidark_target_urlfield.py
|
pidman/pid/migrations/0003_rm_invalidark_target_urlfield.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pid', '0002_pid_sequence_initial_value'),
]
operations = [
migrations.DeleteModel(
name='InvalidArk',
),
migrations.AlterField(
model_name='target',
name='uri',
field=models.URLField(max_length=2048),
),
]
|
Add invalid ark and target uri field migration
|
Add invalid ark and target uri field migration
Not significant changes, but django complains about model changes
not matching otherwise.
|
Python
|
apache-2.0
|
emory-libraries/pidman,emory-libraries/pidman
|
Add invalid ark and target uri field migration
Not significant changes, but django complains about model changes
not matching otherwise.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pid', '0002_pid_sequence_initial_value'),
]
operations = [
migrations.DeleteModel(
name='InvalidArk',
),
migrations.AlterField(
model_name='target',
name='uri',
field=models.URLField(max_length=2048),
),
]
|
<commit_before><commit_msg>Add invalid ark and target uri field migration
Not significant changes, but django complains about model changes
not matching otherwise.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pid', '0002_pid_sequence_initial_value'),
]
operations = [
migrations.DeleteModel(
name='InvalidArk',
),
migrations.AlterField(
model_name='target',
name='uri',
field=models.URLField(max_length=2048),
),
]
|
Add invalid ark and target uri field migration
Not significant changes, but django complains about model changes
not matching otherwise.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pid', '0002_pid_sequence_initial_value'),
]
operations = [
migrations.DeleteModel(
name='InvalidArk',
),
migrations.AlterField(
model_name='target',
name='uri',
field=models.URLField(max_length=2048),
),
]
|
<commit_before><commit_msg>Add invalid ark and target uri field migration
Not significant changes, but django complains about model changes
not matching otherwise.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pid', '0002_pid_sequence_initial_value'),
]
operations = [
migrations.DeleteModel(
name='InvalidArk',
),
migrations.AlterField(
model_name='target',
name='uri',
field=models.URLField(max_length=2048),
),
]
|
|
0a47c0b259dba0634e1287515c339e9a1e0306ae
|
folia2kaf.py
|
folia2kaf.py
|
"""Create KAF file based on FoLiA file
Usage: python kaf2folia.py <file in>
"""
from pynlpl.formats import folia
from lxml import etree
# Load document
doc = folia.Document(file='medea-folia-no_events.xml')
# output document
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
# words
for paragraph in doc.paragraphs():
for sentence in paragraph.sentences():
for word in sentence.words():
w = etree.SubElement(text, 'wf', wid=word.id, sent=sentence.id,
para=paragraph.id)
w.text = unicode(word)
print etree.tostring(kaf_document, pretty_print=True)
|
Add script to transform folia into kaf
|
Add script to transform folia into kaf
Added the first version of the script to transform folia xml into kaf.
The script is not finished, because it is still unknown what the folia
files will exactly look like.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to transform folia into kaf
Added the first version of the script to transform folia xml into kaf.
The script is not finished, because it is still unknown what the folia
files will exactly look like.
|
"""Create KAF file based on FoLiA file
Usage: python kaf2folia.py <file in>
"""
from pynlpl.formats import folia
from lxml import etree
# Load document
doc = folia.Document(file='medea-folia-no_events.xml')
# output document
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
# words
for paragraph in doc.paragraphs():
for sentence in paragraph.sentences():
for word in sentence.words():
w = etree.SubElement(text, 'wf', wid=word.id, sent=sentence.id,
para=paragraph.id)
w.text = unicode(word)
print etree.tostring(kaf_document, pretty_print=True)
|
<commit_before><commit_msg>Add script to transform folia into kaf
Added the first version of the script to transform folia xml into kaf.
The script is not finished, because it is still unknown what the folia
files will exactly look like.<commit_after>
|
"""Create KAF file based on FoLiA file
Usage: python kaf2folia.py <file in>
"""
from pynlpl.formats import folia
from lxml import etree
# Load document
doc = folia.Document(file='medea-folia-no_events.xml')
# output document
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
# words
for paragraph in doc.paragraphs():
for sentence in paragraph.sentences():
for word in sentence.words():
w = etree.SubElement(text, 'wf', wid=word.id, sent=sentence.id,
para=paragraph.id)
w.text = unicode(word)
print etree.tostring(kaf_document, pretty_print=True)
|
Add script to transform folia into kaf
Added the first version of the script to transform folia xml into kaf.
The script is not finished, because it is still unknown what the folia
files will exactly look like."""Create KAF file based on FoLiA file
Usage: python kaf2folia.py <file in>
"""
from pynlpl.formats import folia
from lxml import etree
# Load document
doc = folia.Document(file='medea-folia-no_events.xml')
# output document
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
# words
for paragraph in doc.paragraphs():
for sentence in paragraph.sentences():
for word in sentence.words():
w = etree.SubElement(text, 'wf', wid=word.id, sent=sentence.id,
para=paragraph.id)
w.text = unicode(word)
print etree.tostring(kaf_document, pretty_print=True)
|
<commit_before><commit_msg>Add script to transform folia into kaf
Added the first version of the script to transform folia xml into kaf.
The script is not finished, because it is still unknown what the folia
files will exactly look like.<commit_after>"""Create KAF file based on FoLiA file
Usage: python kaf2folia.py <file in>
"""
from pynlpl.formats import folia
from lxml import etree
# Load document
doc = folia.Document(file='medea-folia-no_events.xml')
# output document
root = etree.Element('KAF')
kaf_document = etree.ElementTree(root)
text = etree.SubElement(root, 'text')
# words
for paragraph in doc.paragraphs():
for sentence in paragraph.sentences():
for word in sentence.words():
w = etree.SubElement(text, 'wf', wid=word.id, sent=sentence.id,
para=paragraph.id)
w.text = unicode(word)
print etree.tostring(kaf_document, pretty_print=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.