commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a74b8683315c14401ad535d373f0570e3e59510f
|
hummedia/test/test_user.py
|
hummedia/test/test_user.py
|
import json
def test_patch_user(app, ACCOUNTS):
from mongokit import Document, Connection
from hummedia import config
from hummedia.models import User
from bson.objectid import ObjectId
connection = Connection(host=config.MONGODB_HOST, port=config.MONGODB_PORT)
user = connection[User.__database__][User.__collection__]
pid = str(ObjectId())
a = {'pid': pid}
a.update(ACCOUNTS['STUDENT'])
user.insert(a)
app.login(ACCOUNTS['SUPERUSER'])
patch = {"username": a['username'],"superuser": a['superuser'],"firstname":"George","preferredLanguage":"en","lastname":"Norris","userid":"555555560","role": a['role'],"oauth":{"twitter":{},"google":{"access_token":[],"id":None,"email":None},"facebook":{}},"fullname":"George Norris","_id":str(pid),"email":"","isSaving":True}
print patch
r = app.patch('/account/' + pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
print r.data
|
Test showing that users cannot be updated.
|
Test showing that users cannot be updated.
|
Python
|
mpl-2.0
|
webnard/byu-hummedia-api,BYU-ODH/byu-hummedia-api,trevren11/byu-hummedia-api,jlmcdonald/byu-hummedia-api,dcravenus/byu-hummedia-api,trevren11/byu-hummedia-api,BYU-ODH/byu-hummedia-api,webnard/byu-hummedia-api,jlmcdonald/byu-hummedia-api,dcravenus/byu-hummedia-api
|
Test showing that users cannot be updated.
|
import json
def test_patch_user(app, ACCOUNTS):
from mongokit import Document, Connection
from hummedia import config
from hummedia.models import User
from bson.objectid import ObjectId
connection = Connection(host=config.MONGODB_HOST, port=config.MONGODB_PORT)
user = connection[User.__database__][User.__collection__]
pid = str(ObjectId())
a = {'pid': pid}
a.update(ACCOUNTS['STUDENT'])
user.insert(a)
app.login(ACCOUNTS['SUPERUSER'])
patch = {"username": a['username'],"superuser": a['superuser'],"firstname":"George","preferredLanguage":"en","lastname":"Norris","userid":"555555560","role": a['role'],"oauth":{"twitter":{},"google":{"access_token":[],"id":None,"email":None},"facebook":{}},"fullname":"George Norris","_id":str(pid),"email":"","isSaving":True}
print patch
r = app.patch('/account/' + pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
print r.data
|
<commit_before><commit_msg>Test showing that users cannot be updated.<commit_after>
|
import json
def test_patch_user(app, ACCOUNTS):
from mongokit import Document, Connection
from hummedia import config
from hummedia.models import User
from bson.objectid import ObjectId
connection = Connection(host=config.MONGODB_HOST, port=config.MONGODB_PORT)
user = connection[User.__database__][User.__collection__]
pid = str(ObjectId())
a = {'pid': pid}
a.update(ACCOUNTS['STUDENT'])
user.insert(a)
app.login(ACCOUNTS['SUPERUSER'])
patch = {"username": a['username'],"superuser": a['superuser'],"firstname":"George","preferredLanguage":"en","lastname":"Norris","userid":"555555560","role": a['role'],"oauth":{"twitter":{},"google":{"access_token":[],"id":None,"email":None},"facebook":{}},"fullname":"George Norris","_id":str(pid),"email":"","isSaving":True}
print patch
r = app.patch('/account/' + pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
print r.data
|
Test showing that users cannot be updated.import json
def test_patch_user(app, ACCOUNTS):
from mongokit import Document, Connection
from hummedia import config
from hummedia.models import User
from bson.objectid import ObjectId
connection = Connection(host=config.MONGODB_HOST, port=config.MONGODB_PORT)
user = connection[User.__database__][User.__collection__]
pid = str(ObjectId())
a = {'pid': pid}
a.update(ACCOUNTS['STUDENT'])
user.insert(a)
app.login(ACCOUNTS['SUPERUSER'])
patch = {"username": a['username'],"superuser": a['superuser'],"firstname":"George","preferredLanguage":"en","lastname":"Norris","userid":"555555560","role": a['role'],"oauth":{"twitter":{},"google":{"access_token":[],"id":None,"email":None},"facebook":{}},"fullname":"George Norris","_id":str(pid),"email":"","isSaving":True}
print patch
r = app.patch('/account/' + pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
print r.data
|
<commit_before><commit_msg>Test showing that users cannot be updated.<commit_after>import json
def test_patch_user(app, ACCOUNTS):
from mongokit import Document, Connection
from hummedia import config
from hummedia.models import User
from bson.objectid import ObjectId
connection = Connection(host=config.MONGODB_HOST, port=config.MONGODB_PORT)
user = connection[User.__database__][User.__collection__]
pid = str(ObjectId())
a = {'pid': pid}
a.update(ACCOUNTS['STUDENT'])
user.insert(a)
app.login(ACCOUNTS['SUPERUSER'])
patch = {"username": a['username'],"superuser": a['superuser'],"firstname":"George","preferredLanguage":"en","lastname":"Norris","userid":"555555560","role": a['role'],"oauth":{"twitter":{},"google":{"access_token":[],"id":None,"email":None},"facebook":{}},"fullname":"George Norris","_id":str(pid),"email":"","isSaving":True}
print patch
r = app.patch('/account/' + pid, data=json.dumps(patch), headers={'Content-Type': 'application/json'})
print r.data
|
|
5a85c3dc02db2aa1461c8cc3f8e5881dd072c1e0
|
test/test_acoustics.py
|
test/test_acoustics.py
|
import numpy as np
import pyfds as fds
def test_acoustic_material():
water = fds.AcousticMaterial(1500, 1000)
water.bulk_viscosity = 1e-3
water.shear_viscosity = 1e-3
assert np.isclose(water.absorption_coef, 7e-3 / 3)
|
Add test case for AcousticMaterial.
|
Add test case for AcousticMaterial.
|
Python
|
bsd-3-clause
|
emtpb/pyfds
|
Add test case for AcousticMaterial.
|
import numpy as np
import pyfds as fds
def test_acoustic_material():
water = fds.AcousticMaterial(1500, 1000)
water.bulk_viscosity = 1e-3
water.shear_viscosity = 1e-3
assert np.isclose(water.absorption_coef, 7e-3 / 3)
|
<commit_before><commit_msg>Add test case for AcousticMaterial.<commit_after>
|
import numpy as np
import pyfds as fds
def test_acoustic_material():
water = fds.AcousticMaterial(1500, 1000)
water.bulk_viscosity = 1e-3
water.shear_viscosity = 1e-3
assert np.isclose(water.absorption_coef, 7e-3 / 3)
|
Add test case for AcousticMaterial.import numpy as np
import pyfds as fds
def test_acoustic_material():
water = fds.AcousticMaterial(1500, 1000)
water.bulk_viscosity = 1e-3
water.shear_viscosity = 1e-3
assert np.isclose(water.absorption_coef, 7e-3 / 3)
|
<commit_before><commit_msg>Add test case for AcousticMaterial.<commit_after>import numpy as np
import pyfds as fds
def test_acoustic_material():
water = fds.AcousticMaterial(1500, 1000)
water.bulk_viscosity = 1e-3
water.shear_viscosity = 1e-3
assert np.isclose(water.absorption_coef, 7e-3 / 3)
|
|
9ab7baadfa9c04d899036c81a41f14fe5fbd3687
|
bokeh/types.py
|
bokeh/types.py
|
from functools import wraps
from six import iteritems
from .properties import Property
_do_type_check = False
def _type_check(func, type_spec):
pass
def sig(**type_spec):
for key, val in iteritems(type_spec):
if isinstance(val, Property):
continue
elif issubclass(val, Property):
type_spec[key] = val()
else:
raise ValueError("%s=%r is not a valid type specification" % (key, val))
def sig_decorator(func):
if not _do_type_check:
return func
else:
@wraps(func)
def wrapped_func(*args, **kwargs):
_type_check(func, type_spec)
return func(*args, **kwargs)
return wrapped_func
return sig_decorator
|
Allow to specify function signature
|
Allow to specify function signature
|
Python
|
bsd-3-clause
|
evidation-health/bokeh,bsipocz/bokeh,mindriot101/bokeh,maxalbert/bokeh,aiguofer/bokeh,paultcochrane/bokeh,paultcochrane/bokeh,schoolie/bokeh,ChristosChristofidis/bokeh,saifrahmed/bokeh,aavanian/bokeh,DuCorey/bokeh,aiguofer/bokeh,xguse/bokeh,timothydmorton/bokeh,philippjfr/bokeh,laurent-george/bokeh,ericdill/bokeh,ptitjano/bokeh,lukebarnard1/bokeh,carlvlewis/bokeh,gpfreitas/bokeh,rothnic/bokeh,eteq/bokeh,birdsarah/bokeh,satishgoda/bokeh,azjps/bokeh,akloster/bokeh,bsipocz/bokeh,carlvlewis/bokeh,caseyclements/bokeh,stuart-knock/bokeh,rothnic/bokeh,timothydmorton/bokeh,josherick/bokeh,Karel-van-de-Plassche/bokeh,ahmadia/bokeh,clairetang6/bokeh,ericdill/bokeh,rothnic/bokeh,clairetang6/bokeh,KasperPRasmussen/bokeh,bokeh/bokeh,xguse/bokeh,rs2/bokeh,evidation-health/bokeh,rhiever/bokeh,gpfreitas/bokeh,htygithub/bokeh,laurent-george/bokeh,stonebig/bokeh,khkaminska/bokeh,draperjames/bokeh,timsnyder/bokeh,timsnyder/bokeh,ericdill/bokeh,caseyclements/bokeh,sahat/bokeh,timsnyder/bokeh,DuCorey/bokeh,almarklein/bokeh,dennisobrien/bokeh,akloster/bokeh,khkaminska/bokeh,mindriot101/bokeh,matbra/bokeh,laurent-george/bokeh,stonebig/bokeh,carlvlewis/bokeh,paultcochrane/bokeh,percyfal/bokeh,evidation-health/bokeh,muku42/bokeh,evidation-health/bokeh,canavandl/bokeh,philippjfr/bokeh,CrazyGuo/bokeh,timothydmorton/bokeh,matbra/bokeh,draperjames/bokeh,paultcochrane/bokeh,schoolie/bokeh,alan-unravel/bokeh,saifrahmed/bokeh,azjps/bokeh,ptitjano/bokeh,alan-unravel/bokeh,muku42/bokeh,bokeh/bokeh,rs2/bokeh,stuart-knock/bokeh,DuCorey/bokeh,jplourenco/bokeh,DuCorey/bokeh,aiguofer/bokeh,phobson/bokeh,josherick/bokeh,deeplook/bokeh,justacec/bokeh,satishgoda/bokeh,stuart-knock/bokeh,jplourenco/bokeh,ericmjl/bokeh,htygithub/bokeh,percyfal/bokeh,deeplook/bokeh,muku42/bokeh,lukebarnard1/bokeh,abele/bokeh,mindriot101/bokeh,khkaminska/bokeh,aavanian/bokeh,lukebarnard1/bokeh,sahat/bokeh,laurent-george/bokeh,stonebig/bokeh,roxyboy/bokeh,draperjames/bokeh,htygithub/bokeh,clairetang6/bokeh,ericdill/bokeh,ericmjl/bokeh,msarahan/bokeh,Karel-van-de-Plassche/bokeh,bsipocz/bokeh,DuCorey/bokeh,tacaswell/bokeh,azjps/bokeh,awanke/bokeh,josherick/bokeh,carlvlewis/bokeh,matbra/bokeh,ericmjl/bokeh,almarklein/bokeh,ericmjl/bokeh,roxyboy/bokeh,phobson/bokeh,josherick/bokeh,sahat/bokeh,caseyclements/bokeh,quasiben/bokeh,ptitjano/bokeh,jplourenco/bokeh,timsnyder/bokeh,maxalbert/bokeh,bokeh/bokeh,eteq/bokeh,alan-unravel/bokeh,CrazyGuo/bokeh,eteq/bokeh,Karel-van-de-Plassche/bokeh,draperjames/bokeh,PythonCharmers/bokeh,xguse/bokeh,bsipocz/bokeh,tacaswell/bokeh,rothnic/bokeh,tacaswell/bokeh,ptitjano/bokeh,ChristosChristofidis/bokeh,caseyclements/bokeh,philippjfr/bokeh,daodaoliang/bokeh,mutirri/bokeh,aiguofer/bokeh,abele/bokeh,tacaswell/bokeh,justacec/bokeh,canavandl/bokeh,deeplook/bokeh,percyfal/bokeh,satishgoda/bokeh,CrazyGuo/bokeh,deeplook/bokeh,KasperPRasmussen/bokeh,daodaoliang/bokeh,matbra/bokeh,rhiever/bokeh,birdsarah/bokeh,srinathv/bokeh,abele/bokeh,percyfal/bokeh,schoolie/bokeh,ahmadia/bokeh,ChristosChristofidis/bokeh,akloster/bokeh,ahmadia/bokeh,lukebarnard1/bokeh,canavandl/bokeh,rs2/bokeh,rhiever/bokeh,phobson/bokeh,jakirkham/bokeh,srinathv/bokeh,dennisobrien/bokeh,ptitjano/bokeh,awanke/bokeh,justacec/bokeh,schoolie/bokeh,jakirkham/bokeh,azjps/bokeh,msarahan/bokeh,mindriot101/bokeh,mutirri/bokeh,maxalbert/bokeh,mutirri/bokeh,timothydmorton/bokeh,birdsarah/bokeh,aavanian/bokeh,justacec/bokeh,stuart-knock/bokeh,srinathv/bokeh,phobson/bokeh,clairetang6/bokeh,gpfreitas/bokeh,gpfreitas/bokeh,PythonCharmers/bokeh,abele/bokeh,azjps/bokeh,maxalbert/bokeh,mutirri/bokeh,roxyboy/bokeh,phobson/bokeh,eteq/bokeh,philippjfr/bokeh,Karel-van-de-Plassche/bokeh,awanke/bokeh,ericmjl/bokeh,quasiben/bokeh,ChinaQuants/bokeh,htygithub/bokeh,CrazyGuo/bokeh,bokeh/bokeh,saifrahmed/bokeh,bokeh/bokeh,dennisobrien/bokeh,msarahan/bokeh,daodaoliang/bokeh,birdsarah/bokeh,jakirkham/bokeh,akloster/bokeh,timsnyder/bokeh,aiguofer/bokeh,msarahan/bokeh,Karel-van-de-Plassche/bokeh,schoolie/bokeh,ChinaQuants/bokeh,aavanian/bokeh,quasiben/bokeh,aavanian/bokeh,ChinaQuants/bokeh,xguse/bokeh,awanke/bokeh,alan-unravel/bokeh,daodaoliang/bokeh,roxyboy/bokeh,saifrahmed/bokeh,ChinaQuants/bokeh,jakirkham/bokeh,stonebig/bokeh,khkaminska/bokeh,rhiever/bokeh,percyfal/bokeh,draperjames/bokeh,ChristosChristofidis/bokeh,philippjfr/bokeh,srinathv/bokeh,muku42/bokeh,PythonCharmers/bokeh,KasperPRasmussen/bokeh,jakirkham/bokeh,satishgoda/bokeh,canavandl/bokeh,ahmadia/bokeh,jplourenco/bokeh,KasperPRasmussen/bokeh,dennisobrien/bokeh,PythonCharmers/bokeh,KasperPRasmussen/bokeh,almarklein/bokeh,rs2/bokeh,rs2/bokeh,dennisobrien/bokeh
|
Allow to specify function signature
|
from functools import wraps
from six import iteritems
from .properties import Property
_do_type_check = False
def _type_check(func, type_spec):
pass
def sig(**type_spec):
for key, val in iteritems(type_spec):
if isinstance(val, Property):
continue
elif issubclass(val, Property):
type_spec[key] = val()
else:
raise ValueError("%s=%r is not a valid type specification" % (key, val))
def sig_decorator(func):
if not _do_type_check:
return func
else:
@wraps(func)
def wrapped_func(*args, **kwargs):
_type_check(func, type_spec)
return func(*args, **kwargs)
return wrapped_func
return sig_decorator
|
<commit_before><commit_msg>Allow to specify function signature<commit_after>
|
from functools import wraps
from six import iteritems
from .properties import Property
_do_type_check = False
def _type_check(func, type_spec):
pass
def sig(**type_spec):
for key, val in iteritems(type_spec):
if isinstance(val, Property):
continue
elif issubclass(val, Property):
type_spec[key] = val()
else:
raise ValueError("%s=%r is not a valid type specification" % (key, val))
def sig_decorator(func):
if not _do_type_check:
return func
else:
@wraps(func)
def wrapped_func(*args, **kwargs):
_type_check(func, type_spec)
return func(*args, **kwargs)
return wrapped_func
return sig_decorator
|
Allow to specify function signaturefrom functools import wraps
from six import iteritems
from .properties import Property
_do_type_check = False
def _type_check(func, type_spec):
pass
def sig(**type_spec):
for key, val in iteritems(type_spec):
if isinstance(val, Property):
continue
elif issubclass(val, Property):
type_spec[key] = val()
else:
raise ValueError("%s=%r is not a valid type specification" % (key, val))
def sig_decorator(func):
if not _do_type_check:
return func
else:
@wraps(func)
def wrapped_func(*args, **kwargs):
_type_check(func, type_spec)
return func(*args, **kwargs)
return wrapped_func
return sig_decorator
|
<commit_before><commit_msg>Allow to specify function signature<commit_after>from functools import wraps
from six import iteritems
from .properties import Property
_do_type_check = False
def _type_check(func, type_spec):
pass
def sig(**type_spec):
for key, val in iteritems(type_spec):
if isinstance(val, Property):
continue
elif issubclass(val, Property):
type_spec[key] = val()
else:
raise ValueError("%s=%r is not a valid type specification" % (key, val))
def sig_decorator(func):
if not _do_type_check:
return func
else:
@wraps(func)
def wrapped_func(*args, **kwargs):
_type_check(func, type_spec)
return func(*args, **kwargs)
return wrapped_func
return sig_decorator
|
|
7f63cd2bc9a94180da17cd3170540b49d535ec0c
|
csdms/dakota/tests/test_environment_base.py
|
csdms/dakota/tests/test_environment_base.py
|
"""Tests for the csdms.dakota.environment.base module."""
from nose.tools import raises, assert_true, assert_equal
from csdms.dakota.environment.base import EnvironmentBase
class Concrete(EnvironmentBase):
"""A subclass of EnvironmentBase used for testing."""
def __init__(self):
EnvironmentBase.__init__(self)
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether EnvironmentBase fails to instantiate."""
d = EnvironmentBase()
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
Add unit tests for EnvironmentBase class
|
Add unit tests for EnvironmentBase class
|
Python
|
mit
|
csdms/dakota,csdms/dakota
|
Add unit tests for EnvironmentBase class
|
"""Tests for the csdms.dakota.environment.base module."""
from nose.tools import raises, assert_true, assert_equal
from csdms.dakota.environment.base import EnvironmentBase
class Concrete(EnvironmentBase):
"""A subclass of EnvironmentBase used for testing."""
def __init__(self):
EnvironmentBase.__init__(self)
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether EnvironmentBase fails to instantiate."""
d = EnvironmentBase()
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
<commit_before><commit_msg>Add unit tests for EnvironmentBase class<commit_after>
|
"""Tests for the csdms.dakota.environment.base module."""
from nose.tools import raises, assert_true, assert_equal
from csdms.dakota.environment.base import EnvironmentBase
class Concrete(EnvironmentBase):
"""A subclass of EnvironmentBase used for testing."""
def __init__(self):
EnvironmentBase.__init__(self)
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether EnvironmentBase fails to instantiate."""
d = EnvironmentBase()
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
Add unit tests for EnvironmentBase class"""Tests for the csdms.dakota.environment.base module."""
from nose.tools import raises, assert_true, assert_equal
from csdms.dakota.environment.base import EnvironmentBase
class Concrete(EnvironmentBase):
"""A subclass of EnvironmentBase used for testing."""
def __init__(self):
EnvironmentBase.__init__(self)
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether EnvironmentBase fails to instantiate."""
d = EnvironmentBase()
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
<commit_before><commit_msg>Add unit tests for EnvironmentBase class<commit_after>"""Tests for the csdms.dakota.environment.base module."""
from nose.tools import raises, assert_true, assert_equal
from csdms.dakota.environment.base import EnvironmentBase
class Concrete(EnvironmentBase):
"""A subclass of EnvironmentBase used for testing."""
def __init__(self):
EnvironmentBase.__init__(self)
def setup_module():
"""Called before any tests are performed."""
print('\n*** ' + __name__)
global c
c = Concrete()
def teardown_module():
"""Called after all tests have completed."""
pass
@raises(TypeError)
def test_instantiate():
"""Test whether EnvironmentBase fails to instantiate."""
d = EnvironmentBase()
def test_str_special():
"""Test type of __str__ method results."""
s = str(c)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
s = str(c)
n_lines = len(s.splitlines())
assert_equal(n_lines, 1)
|
|
99bc39c983c9c7a408eaca530db1eadbba9eb4ad
|
corehq/apps/hqadmin/migrations/0018_back_populate_deploy_commit.py
|
corehq/apps/hqadmin/migrations/0018_back_populate_deploy_commit.py
|
# Generated by Django 2.2.13 on 2020-07-30 18:59
from django.db import migrations
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def back_populate(apps, schema_editor):
HqDeploy = apps.get_model('hqadmin', 'HqDeploy')
for deploy in HqDeploy.objects.all():
if not deploy.commit:
deploy.commit = _get_commit_from_url(deploy.diff_url)
def _get_commit_from_url(diff_url):
try:
ref_comparison = diff_url.split('/')[-1]
last_deploy_sha, current_deploy_sha = ref_comparison.split('...')
return current_deploy_sha
except ValueError:
# not a real diff_url
return None
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0017_hqdeploy_commit'),
]
operations = [
migrations.RunPython(back_populate),
]
|
Add migration that back populates the new commit field in the HqDeploy model
|
Add migration that back populates the new commit field in the HqDeploy model
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration that back populates the new commit field in the HqDeploy model
|
# Generated by Django 2.2.13 on 2020-07-30 18:59
from django.db import migrations
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def back_populate(apps, schema_editor):
HqDeploy = apps.get_model('hqadmin', 'HqDeploy')
for deploy in HqDeploy.objects.all():
if not deploy.commit:
deploy.commit = _get_commit_from_url(deploy.diff_url)
def _get_commit_from_url(diff_url):
try:
ref_comparison = diff_url.split('/')[-1]
last_deploy_sha, current_deploy_sha = ref_comparison.split('...')
return current_deploy_sha
except ValueError:
# not a real diff_url
return None
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0017_hqdeploy_commit'),
]
operations = [
migrations.RunPython(back_populate),
]
|
<commit_before><commit_msg>Add migration that back populates the new commit field in the HqDeploy model<commit_after>
|
# Generated by Django 2.2.13 on 2020-07-30 18:59
from django.db import migrations
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def back_populate(apps, schema_editor):
HqDeploy = apps.get_model('hqadmin', 'HqDeploy')
for deploy in HqDeploy.objects.all():
if not deploy.commit:
deploy.commit = _get_commit_from_url(deploy.diff_url)
def _get_commit_from_url(diff_url):
try:
ref_comparison = diff_url.split('/')[-1]
last_deploy_sha, current_deploy_sha = ref_comparison.split('...')
return current_deploy_sha
except ValueError:
# not a real diff_url
return None
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0017_hqdeploy_commit'),
]
operations = [
migrations.RunPython(back_populate),
]
|
Add migration that back populates the new commit field in the HqDeploy model# Generated by Django 2.2.13 on 2020-07-30 18:59
from django.db import migrations
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def back_populate(apps, schema_editor):
HqDeploy = apps.get_model('hqadmin', 'HqDeploy')
for deploy in HqDeploy.objects.all():
if not deploy.commit:
deploy.commit = _get_commit_from_url(deploy.diff_url)
def _get_commit_from_url(diff_url):
try:
ref_comparison = diff_url.split('/')[-1]
last_deploy_sha, current_deploy_sha = ref_comparison.split('...')
return current_deploy_sha
except ValueError:
# not a real diff_url
return None
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0017_hqdeploy_commit'),
]
operations = [
migrations.RunPython(back_populate),
]
|
<commit_before><commit_msg>Add migration that back populates the new commit field in the HqDeploy model<commit_after># Generated by Django 2.2.13 on 2020-07-30 18:59
from django.db import migrations
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def back_populate(apps, schema_editor):
HqDeploy = apps.get_model('hqadmin', 'HqDeploy')
for deploy in HqDeploy.objects.all():
if not deploy.commit:
deploy.commit = _get_commit_from_url(deploy.diff_url)
def _get_commit_from_url(diff_url):
try:
ref_comparison = diff_url.split('/')[-1]
last_deploy_sha, current_deploy_sha = ref_comparison.split('...')
return current_deploy_sha
except ValueError:
# not a real diff_url
return None
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0017_hqdeploy_commit'),
]
operations = [
migrations.RunPython(back_populate),
]
|
|
925e1cb04363f5bf95cde2abcf4b9be4a243dbd0
|
docs/examples/image_from_mapfile.py
|
docs/examples/image_from_mapfile.py
|
from subprocess import Popen, PIPE
import tempfile, logging, os
def create_image_from_map(map_file, dll_location):
of = tempfile.NamedTemporaryFile(delete=False, suffix=".png", prefix="tmp_")
of.close()
logging.debug("Creating file %s", of.name)
# [SHP2IMG] -m [MAPFILE] -i png -o [RESULT]
params = ["shp2img","-m", map_file,"-i","png","-o", of.name]
os.environ['PATH'] = dll_location + ';' + os.environ['PATH']
os.environ['PROJ_LIB'] = os.path.join(dll_location, "proj\SHARE")
logging.debug(" ".join(params))
p = Popen(params, stdout=PIPE, bufsize=1)
with p.stdout:
print(map_file)
for line in iter(p.stdout.readline, b''):
print(line)
p.wait() # wait for the subprocess to exit
os.startfile(of.name)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mf = r"D:\GitHub\mapserver\msautotest\gdal\256_overlay_res.map"
dll_location = r"C:\MapServer\bin"
create_image_from_map(mf, dll_location) # fn for original map
print("Done!")
|
Create sample image generation script
|
Create sample image generation script
|
Python
|
mit
|
geographika/mappyfile,geographika/mappyfile
|
Create sample image generation script
|
from subprocess import Popen, PIPE
import tempfile, logging, os
def create_image_from_map(map_file, dll_location):
of = tempfile.NamedTemporaryFile(delete=False, suffix=".png", prefix="tmp_")
of.close()
logging.debug("Creating file %s", of.name)
# [SHP2IMG] -m [MAPFILE] -i png -o [RESULT]
params = ["shp2img","-m", map_file,"-i","png","-o", of.name]
os.environ['PATH'] = dll_location + ';' + os.environ['PATH']
os.environ['PROJ_LIB'] = os.path.join(dll_location, "proj\SHARE")
logging.debug(" ".join(params))
p = Popen(params, stdout=PIPE, bufsize=1)
with p.stdout:
print(map_file)
for line in iter(p.stdout.readline, b''):
print(line)
p.wait() # wait for the subprocess to exit
os.startfile(of.name)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mf = r"D:\GitHub\mapserver\msautotest\gdal\256_overlay_res.map"
dll_location = r"C:\MapServer\bin"
create_image_from_map(mf, dll_location) # fn for original map
print("Done!")
|
<commit_before><commit_msg>Create sample image generation script<commit_after>
|
from subprocess import Popen, PIPE
import tempfile, logging, os
def create_image_from_map(map_file, dll_location):
of = tempfile.NamedTemporaryFile(delete=False, suffix=".png", prefix="tmp_")
of.close()
logging.debug("Creating file %s", of.name)
# [SHP2IMG] -m [MAPFILE] -i png -o [RESULT]
params = ["shp2img","-m", map_file,"-i","png","-o", of.name]
os.environ['PATH'] = dll_location + ';' + os.environ['PATH']
os.environ['PROJ_LIB'] = os.path.join(dll_location, "proj\SHARE")
logging.debug(" ".join(params))
p = Popen(params, stdout=PIPE, bufsize=1)
with p.stdout:
print(map_file)
for line in iter(p.stdout.readline, b''):
print(line)
p.wait() # wait for the subprocess to exit
os.startfile(of.name)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mf = r"D:\GitHub\mapserver\msautotest\gdal\256_overlay_res.map"
dll_location = r"C:\MapServer\bin"
create_image_from_map(mf, dll_location) # fn for original map
print("Done!")
|
Create sample image generation scriptfrom subprocess import Popen, PIPE
import tempfile, logging, os
def create_image_from_map(map_file, dll_location):
of = tempfile.NamedTemporaryFile(delete=False, suffix=".png", prefix="tmp_")
of.close()
logging.debug("Creating file %s", of.name)
# [SHP2IMG] -m [MAPFILE] -i png -o [RESULT]
params = ["shp2img","-m", map_file,"-i","png","-o", of.name]
os.environ['PATH'] = dll_location + ';' + os.environ['PATH']
os.environ['PROJ_LIB'] = os.path.join(dll_location, "proj\SHARE")
logging.debug(" ".join(params))
p = Popen(params, stdout=PIPE, bufsize=1)
with p.stdout:
print(map_file)
for line in iter(p.stdout.readline, b''):
print(line)
p.wait() # wait for the subprocess to exit
os.startfile(of.name)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mf = r"D:\GitHub\mapserver\msautotest\gdal\256_overlay_res.map"
dll_location = r"C:\MapServer\bin"
create_image_from_map(mf, dll_location) # fn for original map
print("Done!")
|
<commit_before><commit_msg>Create sample image generation script<commit_after>from subprocess import Popen, PIPE
import tempfile, logging, os
def create_image_from_map(map_file, dll_location):
of = tempfile.NamedTemporaryFile(delete=False, suffix=".png", prefix="tmp_")
of.close()
logging.debug("Creating file %s", of.name)
# [SHP2IMG] -m [MAPFILE] -i png -o [RESULT]
params = ["shp2img","-m", map_file,"-i","png","-o", of.name]
os.environ['PATH'] = dll_location + ';' + os.environ['PATH']
os.environ['PROJ_LIB'] = os.path.join(dll_location, "proj\SHARE")
logging.debug(" ".join(params))
p = Popen(params, stdout=PIPE, bufsize=1)
with p.stdout:
print(map_file)
for line in iter(p.stdout.readline, b''):
print(line)
p.wait() # wait for the subprocess to exit
os.startfile(of.name)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
mf = r"D:\GitHub\mapserver\msautotest\gdal\256_overlay_res.map"
dll_location = r"C:\MapServer\bin"
create_image_from_map(mf, dll_location) # fn for original map
print("Done!")
|
|
b881b75624f636d7b4b3d65232655339e0fb9d0b
|
examples/example_ipython_notebook_config.py
|
examples/example_ipython_notebook_config.py
|
c = get_config()
# Tell IPython to use PostgresContentsManager.
c.NotebookApp.contents_manager_class = 'pgcontents.pgmanager.PostgresContentsManager'
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
# c.PostgresContentsManager.db_url = 'postgresql://ssanderson@/pgcontents'
# Set a user ID. Defaults to the result of getpass.getuser()
# c.PostgresContentsManager.user_id = 'my_awesome_username'
# Set a maximum file size, if desired.
# c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
|
Add an example configuration file.
|
DOC: Add an example configuration file.
|
Python
|
apache-2.0
|
quantopian/pgcontents
|
DOC: Add an example configuration file.
|
c = get_config()
# Tell IPython to use PostgresContentsManager.
c.NotebookApp.contents_manager_class = 'pgcontents.pgmanager.PostgresContentsManager'
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
# c.PostgresContentsManager.db_url = 'postgresql://ssanderson@/pgcontents'
# Set a user ID. Defaults to the result of getpass.getuser()
# c.PostgresContentsManager.user_id = 'my_awesome_username'
# Set a maximum file size, if desired.
# c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
|
<commit_before><commit_msg>DOC: Add an example configuration file.<commit_after>
|
c = get_config()
# Tell IPython to use PostgresContentsManager.
c.NotebookApp.contents_manager_class = 'pgcontents.pgmanager.PostgresContentsManager'
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
# c.PostgresContentsManager.db_url = 'postgresql://ssanderson@/pgcontents'
# Set a user ID. Defaults to the result of getpass.getuser()
# c.PostgresContentsManager.user_id = 'my_awesome_username'
# Set a maximum file size, if desired.
# c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
|
DOC: Add an example configuration file.c = get_config()
# Tell IPython to use PostgresContentsManager.
c.NotebookApp.contents_manager_class = 'pgcontents.pgmanager.PostgresContentsManager'
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
# c.PostgresContentsManager.db_url = 'postgresql://ssanderson@/pgcontents'
# Set a user ID. Defaults to the result of getpass.getuser()
# c.PostgresContentsManager.user_id = 'my_awesome_username'
# Set a maximum file size, if desired.
# c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
|
<commit_before><commit_msg>DOC: Add an example configuration file.<commit_after>c = get_config()
# Tell IPython to use PostgresContentsManager.
c.NotebookApp.contents_manager_class = 'pgcontents.pgmanager.PostgresContentsManager'
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
# c.PostgresContentsManager.db_url = 'postgresql://ssanderson@/pgcontents'
# Set a user ID. Defaults to the result of getpass.getuser()
# c.PostgresContentsManager.user_id = 'my_awesome_username'
# Set a maximum file size, if desired.
# c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
|
|
1e47fe1e431c4bfc22bbe4d23e26814c655d65a3
|
energyPATHWAYS/api/change_password.py
|
energyPATHWAYS/api/change_password.py
|
import click
from flask import Flask
import models
# create the application object
app = Flask(__name__)
app.config.from_pyfile('config.py')
models.db.init_app(app)
@click.command()
@click.argument('username')
@click.password_option()
def change_password(username, password):
with app.app_context():
user = models.User.query.filter_by(name=username).one()
user.password = password
models.db.session.commit()
click.echo("Password successfully changed for user %s ." % (user.name,))
if __name__ == '__main__':
change_password()
|
Add script to change password for API user
|
Add script to change password for API user
|
Python
|
mit
|
energyPATHWAYS/energyPATHWAYS
|
Add script to change password for API user
|
import click
from flask import Flask
import models
# create the application object
app = Flask(__name__)
app.config.from_pyfile('config.py')
models.db.init_app(app)
@click.command()
@click.argument('username')
@click.password_option()
def change_password(username, password):
with app.app_context():
user = models.User.query.filter_by(name=username).one()
user.password = password
models.db.session.commit()
click.echo("Password successfully changed for user %s ." % (user.name,))
if __name__ == '__main__':
change_password()
|
<commit_before><commit_msg>Add script to change password for API user<commit_after>
|
import click
from flask import Flask
import models
# create the application object
app = Flask(__name__)
app.config.from_pyfile('config.py')
models.db.init_app(app)
@click.command()
@click.argument('username')
@click.password_option()
def change_password(username, password):
with app.app_context():
user = models.User.query.filter_by(name=username).one()
user.password = password
models.db.session.commit()
click.echo("Password successfully changed for user %s ." % (user.name,))
if __name__ == '__main__':
change_password()
|
Add script to change password for API userimport click
from flask import Flask
import models
# create the application object
app = Flask(__name__)
app.config.from_pyfile('config.py')
models.db.init_app(app)
@click.command()
@click.argument('username')
@click.password_option()
def change_password(username, password):
with app.app_context():
user = models.User.query.filter_by(name=username).one()
user.password = password
models.db.session.commit()
click.echo("Password successfully changed for user %s ." % (user.name,))
if __name__ == '__main__':
change_password()
|
<commit_before><commit_msg>Add script to change password for API user<commit_after>import click
from flask import Flask
import models
# create the application object
app = Flask(__name__)
app.config.from_pyfile('config.py')
models.db.init_app(app)
@click.command()
@click.argument('username')
@click.password_option()
def change_password(username, password):
with app.app_context():
user = models.User.query.filter_by(name=username).one()
user.password = password
models.db.session.commit()
click.echo("Password successfully changed for user %s ." % (user.name,))
if __name__ == '__main__':
change_password()
|
|
89c9b215589ac14aff2ef408f4391f1b8ace9ea3
|
examples/oscilloscope_plotting_pyqtgraph.py
|
examples/oscilloscope_plotting_pyqtgraph.py
|
#
# pymoku example: Plotting Oscilloscope with pyqtgraph
#
# This example demonstrates how you can configure the Oscilloscope instrument,
# and view triggered time-voltage data frames in real-time.
#
# pyqtgraph is used here as an alternative to matplotlib, it has more severe
# dependencies (i.e. Qt libraries), but is capable of significantly higher
# frame rates.
#
# (c) 2017 Liquid Instruments Pty. Ltd.
#
from pymoku import *
from pymoku.instruments import Oscilloscope
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Connect to your Moku by its device name
# Alternatively, use Moku.get_by_serial('#####') or Moku('192.168.###.###')
m = Moku.get_by_name('Moku')
i = m.deploy_or_connect(Oscilloscope)
app = QtGui.QApplication([])
try:
# Trigger on input Channel 1, rising edge, 0V with 0.1V hysteresis
i.set_trigger('in1', 'rising', 0, hysteresis=True)
# View +- 1 second, i.e. trigger in the centre
i.set_timebase(-1,1)
# Generate an output sinewave on Channel 2, 500mVpp, 10Hz, 0V offset
i.gen_sinewave(2, 0.5, 5, 0)
# Set the data source of Channel 1 to be Input 1
i.set_source(1, 'in')
# Set the data source of Channel 2 to the generated output sinewave
i.set_source(2, 'out')
# Create one plot with two curves, drawn with two pens (different colours)
p = pg.plot()
line1 = p.plot(pen=(1,2))
line2 = p.plot(pen=(2,2))
# Called on an immediate-expiry timer from the QApplication main loop
def update():
global line1, line2
data = i.get_realtime_data()
# Update the plot
line1.setData(data.time, data.ch1)
line2.setData(data.time, data.ch2)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
QtGui.QApplication.instance().exec_()
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib
|
PM-372: Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib
|
Python
|
mit
|
liquidinstruments/pymoku
|
PM-372: Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib
|
#
# pymoku example: Plotting Oscilloscope with pyqtgraph
#
# This example demonstrates how you can configure the Oscilloscope instrument,
# and view triggered time-voltage data frames in real-time.
#
# pyqtgraph is used here as an alternative to matplotlib, it has more severe
# dependencies (i.e. Qt libraries), but is capable of significantly higher
# frame rates.
#
# (c) 2017 Liquid Instruments Pty. Ltd.
#
from pymoku import *
from pymoku.instruments import Oscilloscope
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Connect to your Moku by its device name
# Alternatively, use Moku.get_by_serial('#####') or Moku('192.168.###.###')
m = Moku.get_by_name('Moku')
i = m.deploy_or_connect(Oscilloscope)
app = QtGui.QApplication([])
try:
# Trigger on input Channel 1, rising edge, 0V with 0.1V hysteresis
i.set_trigger('in1', 'rising', 0, hysteresis=True)
# View +- 1 second, i.e. trigger in the centre
i.set_timebase(-1,1)
# Generate an output sinewave on Channel 2, 500mVpp, 10Hz, 0V offset
i.gen_sinewave(2, 0.5, 5, 0)
# Set the data source of Channel 1 to be Input 1
i.set_source(1, 'in')
# Set the data source of Channel 2 to the generated output sinewave
i.set_source(2, 'out')
# Create one plot with two curves, drawn with two pens (different colours)
p = pg.plot()
line1 = p.plot(pen=(1,2))
line2 = p.plot(pen=(2,2))
# Called on an immediate-expiry timer from the QApplication main loop
def update():
global line1, line2
data = i.get_realtime_data()
# Update the plot
line1.setData(data.time, data.ch1)
line2.setData(data.time, data.ch2)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
QtGui.QApplication.instance().exec_()
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
<commit_before><commit_msg>PM-372: Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib<commit_after>
|
#
# pymoku example: Plotting Oscilloscope with pyqtgraph
#
# This example demonstrates how you can configure the Oscilloscope instrument,
# and view triggered time-voltage data frames in real-time.
#
# pyqtgraph is used here as an alternative to matplotlib, it has more severe
# dependencies (i.e. Qt libraries), but is capable of significantly higher
# frame rates.
#
# (c) 2017 Liquid Instruments Pty. Ltd.
#
from pymoku import *
from pymoku.instruments import Oscilloscope
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Connect to your Moku by its device name
# Alternatively, use Moku.get_by_serial('#####') or Moku('192.168.###.###')
m = Moku.get_by_name('Moku')
i = m.deploy_or_connect(Oscilloscope)
app = QtGui.QApplication([])
try:
# Trigger on input Channel 1, rising edge, 0V with 0.1V hysteresis
i.set_trigger('in1', 'rising', 0, hysteresis=True)
# View +- 1 second, i.e. trigger in the centre
i.set_timebase(-1,1)
# Generate an output sinewave on Channel 2, 500mVpp, 10Hz, 0V offset
i.gen_sinewave(2, 0.5, 5, 0)
# Set the data source of Channel 1 to be Input 1
i.set_source(1, 'in')
# Set the data source of Channel 2 to the generated output sinewave
i.set_source(2, 'out')
# Create one plot with two curves, drawn with two pens (different colours)
p = pg.plot()
line1 = p.plot(pen=(1,2))
line2 = p.plot(pen=(2,2))
# Called on an immediate-expiry timer from the QApplication main loop
def update():
global line1, line2
data = i.get_realtime_data()
# Update the plot
line1.setData(data.time, data.ch1)
line2.setData(data.time, data.ch2)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
QtGui.QApplication.instance().exec_()
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
PM-372: Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib#
# pymoku example: Plotting Oscilloscope with pyqtgraph
#
# This example demonstrates how you can configure the Oscilloscope instrument,
# and view triggered time-voltage data frames in real-time.
#
# pyqtgraph is used here as an alternative to matplotlib, it has more severe
# dependencies (i.e. Qt libraries), but is capable of significantly higher
# frame rates.
#
# (c) 2017 Liquid Instruments Pty. Ltd.
#
from pymoku import *
from pymoku.instruments import Oscilloscope
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Connect to your Moku by its device name
# Alternatively, use Moku.get_by_serial('#####') or Moku('192.168.###.###')
m = Moku.get_by_name('Moku')
i = m.deploy_or_connect(Oscilloscope)
app = QtGui.QApplication([])
try:
# Trigger on input Channel 1, rising edge, 0V with 0.1V hysteresis
i.set_trigger('in1', 'rising', 0, hysteresis=True)
# View +- 1 second, i.e. trigger in the centre
i.set_timebase(-1,1)
# Generate an output sinewave on Channel 2, 500mVpp, 10Hz, 0V offset
i.gen_sinewave(2, 0.5, 5, 0)
# Set the data source of Channel 1 to be Input 1
i.set_source(1, 'in')
# Set the data source of Channel 2 to the generated output sinewave
i.set_source(2, 'out')
# Create one plot with two curves, drawn with two pens (different colours)
p = pg.plot()
line1 = p.plot(pen=(1,2))
line2 = p.plot(pen=(2,2))
# Called on an immediate-expiry timer from the QApplication main loop
def update():
global line1, line2
data = i.get_realtime_data()
# Update the plot
line1.setData(data.time, data.ch1)
line2.setData(data.time, data.ch2)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
QtGui.QApplication.instance().exec_()
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
<commit_before><commit_msg>PM-372: Add an example showing how to plot in pyqtplot for a speed-increase compared to matplotlib<commit_after>#
# pymoku example: Plotting Oscilloscope with pyqtgraph
#
# This example demonstrates how you can configure the Oscilloscope instrument,
# and view triggered time-voltage data frames in real-time.
#
# pyqtgraph is used here as an alternative to matplotlib, it has more severe
# dependencies (i.e. Qt libraries), but is capable of significantly higher
# frame rates.
#
# (c) 2017 Liquid Instruments Pty. Ltd.
#
from pymoku import *
from pymoku.instruments import Oscilloscope
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
# Connect to your Moku by its device name
# Alternatively, use Moku.get_by_serial('#####') or Moku('192.168.###.###')
m = Moku.get_by_name('Moku')
i = m.deploy_or_connect(Oscilloscope)
app = QtGui.QApplication([])
try:
# Trigger on input Channel 1, rising edge, 0V with 0.1V hysteresis
i.set_trigger('in1', 'rising', 0, hysteresis=True)
# View +- 1 second, i.e. trigger in the centre
i.set_timebase(-1,1)
# Generate an output sinewave on Channel 2, 500mVpp, 10Hz, 0V offset
i.gen_sinewave(2, 0.5, 5, 0)
# Set the data source of Channel 1 to be Input 1
i.set_source(1, 'in')
# Set the data source of Channel 2 to the generated output sinewave
i.set_source(2, 'out')
# Create one plot with two curves, drawn with two pens (different colours)
p = pg.plot()
line1 = p.plot(pen=(1,2))
line2 = p.plot(pen=(2,2))
# Called on an immediate-expiry timer from the QApplication main loop
def update():
global line1, line2
data = i.get_realtime_data()
# Update the plot
line1.setData(data.time, data.ch1)
line2.setData(data.time, data.ch2)
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
QtGui.QApplication.instance().exec_()
finally:
# Close the connection to the Moku device
# This ensures network resources and released correctly
m.close()
|
|
6df3c7f4030555f5020ac1f09e30f75252686d0b
|
py/path-sum-iii.py
|
py/path-sum-iii.py
|
from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
Add py solution for 437. Path Sum III
|
Add py solution for 437. Path Sum III
437. Path Sum III: https://leetcode.com/problems/path-sum-iii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 437. Path Sum III
437. Path Sum III: https://leetcode.com/problems/path-sum-iii/
|
from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
<commit_before><commit_msg>Add py solution for 437. Path Sum III
437. Path Sum III: https://leetcode.com/problems/path-sum-iii/<commit_after>
|
from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
Add py solution for 437. Path Sum III
437. Path Sum III: https://leetcode.com/problems/path-sum-iii/from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
<commit_before><commit_msg>Add py solution for 437. Path Sum III
437. Path Sum III: https://leetcode.com/problems/path-sum-iii/<commit_after>from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
|
50003d11234d73d53b1b3d09c5de963b707bca6b
|
src/nyc_trees/apps/survey/migrations/0022_merge.py
|
src/nyc_trees/apps/survey/migrations/0022_merge.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0021_auto_20150506_1922'),
('survey', '0021_auto_20150506_1406'),
]
operations = [
]
|
Resolve migration conflict via merge
|
Resolve migration conflict via merge
|
Python
|
agpl-3.0
|
azavea/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,azavea/nyc-trees,maurizi/nyc-trees,maurizi/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees
|
Resolve migration conflict via merge
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0021_auto_20150506_1922'),
('survey', '0021_auto_20150506_1406'),
]
operations = [
]
|
<commit_before><commit_msg>Resolve migration conflict via merge<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0021_auto_20150506_1922'),
('survey', '0021_auto_20150506_1406'),
]
operations = [
]
|
Resolve migration conflict via merge# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0021_auto_20150506_1922'),
('survey', '0021_auto_20150506_1406'),
]
operations = [
]
|
<commit_before><commit_msg>Resolve migration conflict via merge<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0021_auto_20150506_1922'),
('survey', '0021_auto_20150506_1406'),
]
operations = [
]
|
|
f363a8e615861cafc9a09b21247ca1b0ddad90a9
|
scripts/igmp_monitor.py
|
scripts/igmp_monitor.py
|
#!/usr/bin/env python
from pcs.packets.localhost import *
from pcs.packets.ethernet import *
from pcs.packets.ipv4 import *
from pcs.packets.igmp import *
from pcs.packets.igmpv2 import *
from pcs.packets.payload import *
from pcs import *
from time import sleep
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-I", "--ether_iface",
dest="ether_iface", default=None,
help="The name of the source interface.")
#parser.add_option("-c", "--count",
# dest="count", default=None,
# help="Stop after receiving at least count responses.")
(options, args) = parser.parse_args()
if options.ether_iface is None:
print "Non-optional argument missing."
return
input = PcapConnector(options.ether_iface)
input.setfilter("igmp")
#
# Wait for up to 'count' responses to the query to arrive and print them.
#
quit = False
while not quit:
packet = input.readpkt()
chain = packet.chain()
print chain
#if chain.packets[2].type == IGMP_v2_HOST_MEMBERSHIP_REPORT:
# #print chain.packets[2].println()
# print "%s is in %s" % \
# (inet_ntop(AF_INET, struct.pack('!L', chain.packets[1].src)), \
# inet_ntop(AF_INET, struct.pack('!L', chain.packets[3].group)))
# count -= 1
main()
|
Add a simple script which just dumps any IGMP traffic which it sees. This exercises the centralized IGMP receive path.
|
Add a simple script which just dumps any IGMP traffic which it sees.
This exercises the centralized IGMP receive path.
|
Python
|
bsd-3-clause
|
gvnn3/PCS,gvnn3/PCS
|
Add a simple script which just dumps any IGMP traffic which it sees.
This exercises the centralized IGMP receive path.
|
#!/usr/bin/env python
from pcs.packets.localhost import *
from pcs.packets.ethernet import *
from pcs.packets.ipv4 import *
from pcs.packets.igmp import *
from pcs.packets.igmpv2 import *
from pcs.packets.payload import *
from pcs import *
from time import sleep
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-I", "--ether_iface",
dest="ether_iface", default=None,
help="The name of the source interface.")
#parser.add_option("-c", "--count",
# dest="count", default=None,
# help="Stop after receiving at least count responses.")
(options, args) = parser.parse_args()
if options.ether_iface is None:
print "Non-optional argument missing."
return
input = PcapConnector(options.ether_iface)
input.setfilter("igmp")
#
# Wait for up to 'count' responses to the query to arrive and print them.
#
quit = False
while not quit:
packet = input.readpkt()
chain = packet.chain()
print chain
#if chain.packets[2].type == IGMP_v2_HOST_MEMBERSHIP_REPORT:
# #print chain.packets[2].println()
# print "%s is in %s" % \
# (inet_ntop(AF_INET, struct.pack('!L', chain.packets[1].src)), \
# inet_ntop(AF_INET, struct.pack('!L', chain.packets[3].group)))
# count -= 1
main()
|
<commit_before><commit_msg>Add a simple script which just dumps any IGMP traffic which it sees.
This exercises the centralized IGMP receive path.<commit_after>
|
#!/usr/bin/env python
from pcs.packets.localhost import *
from pcs.packets.ethernet import *
from pcs.packets.ipv4 import *
from pcs.packets.igmp import *
from pcs.packets.igmpv2 import *
from pcs.packets.payload import *
from pcs import *
from time import sleep
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-I", "--ether_iface",
dest="ether_iface", default=None,
help="The name of the source interface.")
#parser.add_option("-c", "--count",
# dest="count", default=None,
# help="Stop after receiving at least count responses.")
(options, args) = parser.parse_args()
if options.ether_iface is None:
print "Non-optional argument missing."
return
input = PcapConnector(options.ether_iface)
input.setfilter("igmp")
#
# Wait for up to 'count' responses to the query to arrive and print them.
#
quit = False
while not quit:
packet = input.readpkt()
chain = packet.chain()
print chain
#if chain.packets[2].type == IGMP_v2_HOST_MEMBERSHIP_REPORT:
# #print chain.packets[2].println()
# print "%s is in %s" % \
# (inet_ntop(AF_INET, struct.pack('!L', chain.packets[1].src)), \
# inet_ntop(AF_INET, struct.pack('!L', chain.packets[3].group)))
# count -= 1
main()
|
Add a simple script which just dumps any IGMP traffic which it sees.
This exercises the centralized IGMP receive path.#!/usr/bin/env python
from pcs.packets.localhost import *
from pcs.packets.ethernet import *
from pcs.packets.ipv4 import *
from pcs.packets.igmp import *
from pcs.packets.igmpv2 import *
from pcs.packets.payload import *
from pcs import *
from time import sleep
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-I", "--ether_iface",
dest="ether_iface", default=None,
help="The name of the source interface.")
#parser.add_option("-c", "--count",
# dest="count", default=None,
# help="Stop after receiving at least count responses.")
(options, args) = parser.parse_args()
if options.ether_iface is None:
print "Non-optional argument missing."
return
input = PcapConnector(options.ether_iface)
input.setfilter("igmp")
#
# Wait for up to 'count' responses to the query to arrive and print them.
#
quit = False
while not quit:
packet = input.readpkt()
chain = packet.chain()
print chain
#if chain.packets[2].type == IGMP_v2_HOST_MEMBERSHIP_REPORT:
# #print chain.packets[2].println()
# print "%s is in %s" % \
# (inet_ntop(AF_INET, struct.pack('!L', chain.packets[1].src)), \
# inet_ntop(AF_INET, struct.pack('!L', chain.packets[3].group)))
# count -= 1
main()
|
<commit_before><commit_msg>Add a simple script which just dumps any IGMP traffic which it sees.
This exercises the centralized IGMP receive path.<commit_after>#!/usr/bin/env python
from pcs.packets.localhost import *
from pcs.packets.ethernet import *
from pcs.packets.ipv4 import *
from pcs.packets.igmp import *
from pcs.packets.igmpv2 import *
from pcs.packets.payload import *
from pcs import *
from time import sleep
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-I", "--ether_iface",
dest="ether_iface", default=None,
help="The name of the source interface.")
#parser.add_option("-c", "--count",
# dest="count", default=None,
# help="Stop after receiving at least count responses.")
(options, args) = parser.parse_args()
if options.ether_iface is None:
print "Non-optional argument missing."
return
input = PcapConnector(options.ether_iface)
input.setfilter("igmp")
#
# Wait for up to 'count' responses to the query to arrive and print them.
#
quit = False
while not quit:
packet = input.readpkt()
chain = packet.chain()
print chain
#if chain.packets[2].type == IGMP_v2_HOST_MEMBERSHIP_REPORT:
# #print chain.packets[2].println()
# print "%s is in %s" % \
# (inet_ntop(AF_INET, struct.pack('!L', chain.packets[1].src)), \
# inet_ntop(AF_INET, struct.pack('!L', chain.packets[3].group)))
# count -= 1
main()
|
|
bf31d86f89632526418844849183f8065b83aecf
|
scripts/protobuf_raw.py
|
scripts/protobuf_raw.py
|
#!/usr/bin/env python3
"""Raw decode sequence of messages with protoc.
Pass hex string (including variant length). Multiple messages
are supported.
"""
import os
import sys
import binascii
from pyatv.mrp import variant
if __name__ == '__main__':
buf = binascii.unhexlify(sys.argv[1])
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
hexdata = binascii.hexlify(data).decode('ascii')
print('Raw:', hexdata, '\n')
print('Decoded')
os.system('echo ' + hexdata + ' | xxd -r -p | protoc --decode_raw')
print(40*'-')
|
Add script to raw decode messages
|
Add script to raw decode messages
|
Python
|
mit
|
postlund/pyatv,postlund/pyatv
|
Add script to raw decode messages
|
#!/usr/bin/env python3
"""Raw decode sequence of messages with protoc.
Pass hex string (including variant length). Multiple messages
are supported.
"""
import os
import sys
import binascii
from pyatv.mrp import variant
if __name__ == '__main__':
buf = binascii.unhexlify(sys.argv[1])
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
hexdata = binascii.hexlify(data).decode('ascii')
print('Raw:', hexdata, '\n')
print('Decoded')
os.system('echo ' + hexdata + ' | xxd -r -p | protoc --decode_raw')
print(40*'-')
|
<commit_before><commit_msg>Add script to raw decode messages<commit_after>
|
#!/usr/bin/env python3
"""Raw decode sequence of messages with protoc.
Pass hex string (including variant length). Multiple messages
are supported.
"""
import os
import sys
import binascii
from pyatv.mrp import variant
if __name__ == '__main__':
buf = binascii.unhexlify(sys.argv[1])
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
hexdata = binascii.hexlify(data).decode('ascii')
print('Raw:', hexdata, '\n')
print('Decoded')
os.system('echo ' + hexdata + ' | xxd -r -p | protoc --decode_raw')
print(40*'-')
|
Add script to raw decode messages#!/usr/bin/env python3
"""Raw decode sequence of messages with protoc.
Pass hex string (including variant length). Multiple messages
are supported.
"""
import os
import sys
import binascii
from pyatv.mrp import variant
if __name__ == '__main__':
buf = binascii.unhexlify(sys.argv[1])
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
hexdata = binascii.hexlify(data).decode('ascii')
print('Raw:', hexdata, '\n')
print('Decoded')
os.system('echo ' + hexdata + ' | xxd -r -p | protoc --decode_raw')
print(40*'-')
|
<commit_before><commit_msg>Add script to raw decode messages<commit_after>#!/usr/bin/env python3
"""Raw decode sequence of messages with protoc.
Pass hex string (including variant length). Multiple messages
are supported.
"""
import os
import sys
import binascii
from pyatv.mrp import variant
if __name__ == '__main__':
buf = binascii.unhexlify(sys.argv[1])
while buf:
length, raw = variant.read_variant(buf)
data = raw[:length]
buf = raw[length:]
hexdata = binascii.hexlify(data).decode('ascii')
print('Raw:', hexdata, '\n')
print('Decoded')
os.system('echo ' + hexdata + ' | xxd -r -p | protoc --decode_raw')
print(40*'-')
|
|
9d93af117cd035fb42206b408645e9a744ca2281
|
chainer/testing/distribution_test.py
|
chainer/testing/distribution_test.py
|
import unittest
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
else:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
|
Allow `import chainer.testing` without pytest
|
Allow `import chainer.testing` without pytest
|
Python
|
mit
|
niboshi/chainer,wkentaro/chainer,chainer/chainer,hvy/chainer,keisuke-umezawa/chainer,wkentaro/chainer,wkentaro/chainer,chainer/chainer,tkerola/chainer,okuta/chainer,okuta/chainer,chainer/chainer,hvy/chainer,niboshi/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,pfnet/chainer,hvy/chainer,wkentaro/chainer,okuta/chainer,okuta/chainer,hvy/chainer,chainer/chainer,keisuke-umezawa/chainer,niboshi/chainer,niboshi/chainer
|
Allow `import chainer.testing` without pytest
|
import unittest
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
else:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
|
<commit_before><commit_msg>Allow `import chainer.testing` without pytest<commit_after>
|
import unittest
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
else:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
|
Allow `import chainer.testing` without pytestimport unittest
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
else:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
|
<commit_before><commit_msg>Allow `import chainer.testing` without pytest<commit_after>import unittest
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
else:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
|
|
461d77e2c4335b4f3cc3c4ff2d2af8c5a9cdf9bf
|
tests/test_future/test_email_generation.py
|
tests/test_future/test_email_generation.py
|
# -*- coding: utf-8 -*-
"""Tests for email generation."""
from __future__ import unicode_literals
from future.backports.email.mime.multipart import MIMEMultipart
from future.backports.email.mime.text import MIMEText
from future.backports.email.utils import formatdate
from future.tests.base import unittest
class EmailGenerationTests(unittest.TestCase):
def test_email_custom_header_can_contain_unicode(self):
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText('Plain content with Únicødê', _subtype='plain', _charset='utf-8'))
alternative.attach(MIMEText('HTML content with Únicødê', _subtype='html', _charset='utf-8'))
msg.attach(alternative)
msg['Subject'] = 'Subject with Únicødê'
msg['From'] = 'sender@test.com'
msg['To'] = 'recipient@test.com'
msg['Date'] = formatdate(None, localtime=True)
msg['Message-ID'] = 'anIdWithÚnicødêForThisEmail'
msg_lines = msg.as_string().split('\n')
self.assertEqual(msg_lines[2], 'Subject: =?utf-8?b?U3ViamVjdCB3aXRoIMOabmljw7hkw6o=?=')
self.assertEqual(msg_lines[6], 'Message-ID: =?utf-8?b?YW5JZFdpdGjDmm5pY8O4ZMOqRm9yVGhpc0VtYWls?=')
self.assertEqual(msg_lines[17], 'UGxhaW4gY29udGVudCB3aXRoIMOabmljw7hkw6o=')
self.assertEqual(msg_lines[24], 'SFRNTCBjb250ZW50IHdpdGggw5puaWPDuGTDqg==')
|
Add a test for our fix
|
Add a test for our fix
|
Python
|
mit
|
PythonCharmers/python-future,PythonCharmers/python-future
|
Add a test for our fix
|
# -*- coding: utf-8 -*-
"""Tests for email generation."""
from __future__ import unicode_literals
from future.backports.email.mime.multipart import MIMEMultipart
from future.backports.email.mime.text import MIMEText
from future.backports.email.utils import formatdate
from future.tests.base import unittest
class EmailGenerationTests(unittest.TestCase):
def test_email_custom_header_can_contain_unicode(self):
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText('Plain content with Únicødê', _subtype='plain', _charset='utf-8'))
alternative.attach(MIMEText('HTML content with Únicødê', _subtype='html', _charset='utf-8'))
msg.attach(alternative)
msg['Subject'] = 'Subject with Únicødê'
msg['From'] = 'sender@test.com'
msg['To'] = 'recipient@test.com'
msg['Date'] = formatdate(None, localtime=True)
msg['Message-ID'] = 'anIdWithÚnicødêForThisEmail'
msg_lines = msg.as_string().split('\n')
self.assertEqual(msg_lines[2], 'Subject: =?utf-8?b?U3ViamVjdCB3aXRoIMOabmljw7hkw6o=?=')
self.assertEqual(msg_lines[6], 'Message-ID: =?utf-8?b?YW5JZFdpdGjDmm5pY8O4ZMOqRm9yVGhpc0VtYWls?=')
self.assertEqual(msg_lines[17], 'UGxhaW4gY29udGVudCB3aXRoIMOabmljw7hkw6o=')
self.assertEqual(msg_lines[24], 'SFRNTCBjb250ZW50IHdpdGggw5puaWPDuGTDqg==')
|
<commit_before><commit_msg>Add a test for our fix<commit_after>
|
# -*- coding: utf-8 -*-
"""Tests for email generation."""
from __future__ import unicode_literals
from future.backports.email.mime.multipart import MIMEMultipart
from future.backports.email.mime.text import MIMEText
from future.backports.email.utils import formatdate
from future.tests.base import unittest
class EmailGenerationTests(unittest.TestCase):
def test_email_custom_header_can_contain_unicode(self):
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText('Plain content with Únicødê', _subtype='plain', _charset='utf-8'))
alternative.attach(MIMEText('HTML content with Únicødê', _subtype='html', _charset='utf-8'))
msg.attach(alternative)
msg['Subject'] = 'Subject with Únicødê'
msg['From'] = 'sender@test.com'
msg['To'] = 'recipient@test.com'
msg['Date'] = formatdate(None, localtime=True)
msg['Message-ID'] = 'anIdWithÚnicødêForThisEmail'
msg_lines = msg.as_string().split('\n')
self.assertEqual(msg_lines[2], 'Subject: =?utf-8?b?U3ViamVjdCB3aXRoIMOabmljw7hkw6o=?=')
self.assertEqual(msg_lines[6], 'Message-ID: =?utf-8?b?YW5JZFdpdGjDmm5pY8O4ZMOqRm9yVGhpc0VtYWls?=')
self.assertEqual(msg_lines[17], 'UGxhaW4gY29udGVudCB3aXRoIMOabmljw7hkw6o=')
self.assertEqual(msg_lines[24], 'SFRNTCBjb250ZW50IHdpdGggw5puaWPDuGTDqg==')
|
Add a test for our fix# -*- coding: utf-8 -*-
"""Tests for email generation."""
from __future__ import unicode_literals
from future.backports.email.mime.multipart import MIMEMultipart
from future.backports.email.mime.text import MIMEText
from future.backports.email.utils import formatdate
from future.tests.base import unittest
class EmailGenerationTests(unittest.TestCase):
def test_email_custom_header_can_contain_unicode(self):
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText('Plain content with Únicødê', _subtype='plain', _charset='utf-8'))
alternative.attach(MIMEText('HTML content with Únicødê', _subtype='html', _charset='utf-8'))
msg.attach(alternative)
msg['Subject'] = 'Subject with Únicødê'
msg['From'] = 'sender@test.com'
msg['To'] = 'recipient@test.com'
msg['Date'] = formatdate(None, localtime=True)
msg['Message-ID'] = 'anIdWithÚnicødêForThisEmail'
msg_lines = msg.as_string().split('\n')
self.assertEqual(msg_lines[2], 'Subject: =?utf-8?b?U3ViamVjdCB3aXRoIMOabmljw7hkw6o=?=')
self.assertEqual(msg_lines[6], 'Message-ID: =?utf-8?b?YW5JZFdpdGjDmm5pY8O4ZMOqRm9yVGhpc0VtYWls?=')
self.assertEqual(msg_lines[17], 'UGxhaW4gY29udGVudCB3aXRoIMOabmljw7hkw6o=')
self.assertEqual(msg_lines[24], 'SFRNTCBjb250ZW50IHdpdGggw5puaWPDuGTDqg==')
|
<commit_before><commit_msg>Add a test for our fix<commit_after># -*- coding: utf-8 -*-
"""Tests for email generation."""
from __future__ import unicode_literals
from future.backports.email.mime.multipart import MIMEMultipart
from future.backports.email.mime.text import MIMEText
from future.backports.email.utils import formatdate
from future.tests.base import unittest
class EmailGenerationTests(unittest.TestCase):
def test_email_custom_header_can_contain_unicode(self):
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(MIMEText('Plain content with Únicødê', _subtype='plain', _charset='utf-8'))
alternative.attach(MIMEText('HTML content with Únicødê', _subtype='html', _charset='utf-8'))
msg.attach(alternative)
msg['Subject'] = 'Subject with Únicødê'
msg['From'] = 'sender@test.com'
msg['To'] = 'recipient@test.com'
msg['Date'] = formatdate(None, localtime=True)
msg['Message-ID'] = 'anIdWithÚnicødêForThisEmail'
msg_lines = msg.as_string().split('\n')
self.assertEqual(msg_lines[2], 'Subject: =?utf-8?b?U3ViamVjdCB3aXRoIMOabmljw7hkw6o=?=')
self.assertEqual(msg_lines[6], 'Message-ID: =?utf-8?b?YW5JZFdpdGjDmm5pY8O4ZMOqRm9yVGhpc0VtYWls?=')
self.assertEqual(msg_lines[17], 'UGxhaW4gY29udGVudCB3aXRoIMOabmljw7hkw6o=')
self.assertEqual(msg_lines[24], 'SFRNTCBjb250ZW50IHdpdGggw5puaWPDuGTDqg==')
|
|
c2cae70605638455aedcb09ec31dc494071f3be3
|
stoneridge_collator.py
|
stoneridge_collator.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO
|
Add (empty, for now) data collator
|
Add (empty, for now) data collator
|
Python
|
mpl-2.0
|
mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge
|
Add (empty, for now) data collator
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO
|
<commit_before><commit_msg>Add (empty, for now) data collator<commit_after>
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO
|
Add (empty, for now) data collator#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO
|
<commit_before><commit_msg>Add (empty, for now) data collator<commit_after>#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO
|
|
dc59de726719db2c0366a43859e84209d694d249
|
userprofile/migrations/0007_auto_20180323_1747.py
|
userprofile/migrations/0007_auto_20180323_1747.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-23 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0006_auto_20180309_2215'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_name',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AlterField(
model_name='profile',
name='group',
field=models.ManyToManyField(blank=True, related_name='profile', to='userprofile.Group', verbose_name='Gruppe'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures', verbose_name='Profilbilde'),
),
]
|
Add migration files for new userprofile models
|
Add migration files for new userprofile models
|
Python
|
mit
|
hackerspace-ntnu/website,hackerspace-ntnu/website,hackerspace-ntnu/website
|
Add migration files for new userprofile models
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-23 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0006_auto_20180309_2215'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_name',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AlterField(
model_name='profile',
name='group',
field=models.ManyToManyField(blank=True, related_name='profile', to='userprofile.Group', verbose_name='Gruppe'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures', verbose_name='Profilbilde'),
),
]
|
<commit_before><commit_msg>Add migration files for new userprofile models<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-23 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0006_auto_20180309_2215'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_name',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AlterField(
model_name='profile',
name='group',
field=models.ManyToManyField(blank=True, related_name='profile', to='userprofile.Group', verbose_name='Gruppe'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures', verbose_name='Profilbilde'),
),
]
|
Add migration files for new userprofile models# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-23 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0006_auto_20180309_2215'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_name',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AlterField(
model_name='profile',
name='group',
field=models.ManyToManyField(blank=True, related_name='profile', to='userprofile.Group', verbose_name='Gruppe'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures', verbose_name='Profilbilde'),
),
]
|
<commit_before><commit_msg>Add migration files for new userprofile models<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-23 17:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0006_auto_20180309_2215'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_name',
),
migrations.RemoveField(
model_name='profile',
name='name',
),
migrations.AlterField(
model_name='profile',
name='group',
field=models.ManyToManyField(blank=True, related_name='profile', to='userprofile.Group', verbose_name='Gruppe'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=None, upload_to='profilepictures', verbose_name='Profilbilde'),
),
]
|
|
5147963a188f3c13179600d92640f0a33a78af68
|
algorithms/dynamic-programming/longest-increasing-subsequence/solution.py
|
algorithms/dynamic-programming/longest-increasing-subsequence/solution.py
|
#!/usr/bin/env python
import sys
def read(fn):
return fn(sys.stdin.readline())
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i
def lis(array):
"""
Returns the length of the longest increasing sub-sequence in O(NlogN) time.
See http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/. # nopep8
"""
n = len(array)
if n < 2:
return n
# Stores the last value for each candidate increasing list. We maintain
# that last value in shorter lists is less than the last value in longer
# lists.
tails = [array[0]]
for i in range(1, n):
a = array[i]
if a <= tails[0]:
# Begin the shortest LIS at this new smallest element for the most
# possible future LIS candidates.
tails[0] = a
elif tails[-1] < a:
# Create a new longest LIS ending with this new largest element.
tails.append(a)
else:
# Find a LIS where we can replace the last element with a.
j = ceil(tails, a)
tails[j] = a
return len(tails)
def lis_dp(array):
"""
Returns the length of the longest increasing sub-sequence in O(N^2) time.
This is not fast enough to pass with the HackerRank time constraints.
"""
n = len(array)
if n == 0:
return 0
dp = [1] * n
# Let F(i) be the LIS ending with array[i]. F[i] = max({1 + F[j]}) for all
# j < i and array[j] < array[i]. If no such j exists, then F[i] = 1.
for i in range(1, n):
for j in range(0, i):
if array[j] < array[i]:
dp[i] = max(dp[i], 1 + dp[j])
return max(dp)
def main():
N = read(int)
array = [0] * N
for i in range(0, N):
array[i] = read(int)
print(lis(array))
if __name__ == '__main__':
main()
|
Implement Longest Increasing Subsequence in Python
|
Implement Longest Increasing Subsequence in Python
|
Python
|
mit
|
andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms
|
Implement Longest Increasing Subsequence in Python
|
#!/usr/bin/env python
import sys
def read(fn):
return fn(sys.stdin.readline())
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i
def lis(array):
"""
Returns the length of the longest increasing sub-sequence in O(NlogN) time.
See http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/. # nopep8
"""
n = len(array)
if n < 2:
return n
# Stores the last value for each candidate increasing list. We maintain
# that last value in shorter lists is less than the last value in longer
# lists.
tails = [array[0]]
for i in range(1, n):
a = array[i]
if a <= tails[0]:
# Begin the shortest LIS at this new smallest element for the most
# possible future LIS candidates.
tails[0] = a
elif tails[-1] < a:
# Create a new longest LIS ending with this new largest element.
tails.append(a)
else:
# Find a LIS where we can replace the last element with a.
j = ceil(tails, a)
tails[j] = a
return len(tails)
def lis_dp(array):
"""
Returns the length of the longest increasing sub-sequence in O(N^2) time.
This is not fast enough to pass with the HackerRank time constraints.
"""
n = len(array)
if n == 0:
return 0
dp = [1] * n
# Let F(i) be the LIS ending with array[i]. F[i] = max({1 + F[j]}) for all
# j < i and array[j] < array[i]. If no such j exists, then F[i] = 1.
for i in range(1, n):
for j in range(0, i):
if array[j] < array[i]:
dp[i] = max(dp[i], 1 + dp[j])
return max(dp)
def main():
N = read(int)
array = [0] * N
for i in range(0, N):
array[i] = read(int)
print(lis(array))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Implement Longest Increasing Subsequence in Python<commit_after>
|
#!/usr/bin/env python
import sys
def read(fn):
return fn(sys.stdin.readline())
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i
def lis(array):
"""
Returns the length of the longest increasing sub-sequence in O(NlogN) time.
See http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/. # nopep8
"""
n = len(array)
if n < 2:
return n
# Stores the last value for each candidate increasing list. We maintain
# that last value in shorter lists is less than the last value in longer
# lists.
tails = [array[0]]
for i in range(1, n):
a = array[i]
if a <= tails[0]:
# Begin the shortest LIS at this new smallest element for the most
# possible future LIS candidates.
tails[0] = a
elif tails[-1] < a:
# Create a new longest LIS ending with this new largest element.
tails.append(a)
else:
# Find a LIS where we can replace the last element with a.
j = ceil(tails, a)
tails[j] = a
return len(tails)
def lis_dp(array):
"""
Returns the length of the longest increasing sub-sequence in O(N^2) time.
This is not fast enough to pass with the HackerRank time constraints.
"""
n = len(array)
if n == 0:
return 0
dp = [1] * n
# Let F(i) be the LIS ending with array[i]. F[i] = max({1 + F[j]}) for all
# j < i and array[j] < array[i]. If no such j exists, then F[i] = 1.
for i in range(1, n):
for j in range(0, i):
if array[j] < array[i]:
dp[i] = max(dp[i], 1 + dp[j])
return max(dp)
def main():
N = read(int)
array = [0] * N
for i in range(0, N):
array[i] = read(int)
print(lis(array))
if __name__ == '__main__':
main()
|
Implement Longest Increasing Subsequence in Python#!/usr/bin/env python
import sys
def read(fn):
return fn(sys.stdin.readline())
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i
def lis(array):
"""
Returns the length of the longest increasing sub-sequence in O(NlogN) time.
See http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/. # nopep8
"""
n = len(array)
if n < 2:
return n
# Stores the last value for each candidate increasing list. We maintain
# that last value in shorter lists is less than the last value in longer
# lists.
tails = [array[0]]
for i in range(1, n):
a = array[i]
if a <= tails[0]:
# Begin the shortest LIS at this new smallest element for the most
# possible future LIS candidates.
tails[0] = a
elif tails[-1] < a:
# Create a new longest LIS ending with this new largest element.
tails.append(a)
else:
# Find a LIS where we can replace the last element with a.
j = ceil(tails, a)
tails[j] = a
return len(tails)
def lis_dp(array):
"""
Returns the length of the longest increasing sub-sequence in O(N^2) time.
This is not fast enough to pass with the HackerRank time constraints.
"""
n = len(array)
if n == 0:
return 0
dp = [1] * n
# Let F(i) be the LIS ending with array[i]. F[i] = max({1 + F[j]}) for all
# j < i and array[j] < array[i]. If no such j exists, then F[i] = 1.
for i in range(1, n):
for j in range(0, i):
if array[j] < array[i]:
dp[i] = max(dp[i], 1 + dp[j])
return max(dp)
def main():
N = read(int)
array = [0] * N
for i in range(0, N):
array[i] = read(int)
print(lis(array))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Implement Longest Increasing Subsequence in Python<commit_after>#!/usr/bin/env python
import sys
def read(fn):
return fn(sys.stdin.readline())
def ceil(array, value):
"""
Returns the smallest index i such that array[i - 1] < value.
"""
l = 0
r = len(array) - 1
i = r + 1
while l <= r:
m = l + int((r - l) / 2)
if array[m] >= value:
# This mid index is a candidate for the index we are searching for
# so save it, and continue searching for a smaller candidate on the
# left side.
i = m
r = m - 1
else:
# This mid index is not a candidate so continue searching the right
# side.
l = m + 1
return i
def lis(array):
"""
Returns the length of the longest increasing sub-sequence in O(NlogN) time.
See http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/. # nopep8
"""
n = len(array)
if n < 2:
return n
# Stores the last value for each candidate increasing list. We maintain
# that last value in shorter lists is less than the last value in longer
# lists.
tails = [array[0]]
for i in range(1, n):
a = array[i]
if a <= tails[0]:
# Begin the shortest LIS at this new smallest element for the most
# possible future LIS candidates.
tails[0] = a
elif tails[-1] < a:
# Create a new longest LIS ending with this new largest element.
tails.append(a)
else:
# Find a LIS where we can replace the last element with a.
j = ceil(tails, a)
tails[j] = a
return len(tails)
def lis_dp(array):
"""
Returns the length of the longest increasing sub-sequence in O(N^2) time.
This is not fast enough to pass with the HackerRank time constraints.
"""
n = len(array)
if n == 0:
return 0
dp = [1] * n
# Let F(i) be the LIS ending with array[i]. F[i] = max({1 + F[j]}) for all
# j < i and array[j] < array[i]. If no such j exists, then F[i] = 1.
for i in range(1, n):
for j in range(0, i):
if array[j] < array[i]:
dp[i] = max(dp[i], 1 + dp[j])
return max(dp)
def main():
N = read(int)
array = [0] * N
for i in range(0, N):
array[i] = read(int)
print(lis(array))
if __name__ == '__main__':
main()
|
|
ffbedc8ed975b3eba3c8478f12a334bb94ceb6bd
|
openfisca_france/tests/test_enumerations.py
|
openfisca_france/tests/test_enumerations.py
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import columns
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def check(enum_column):
assert enum_column.enum, u'Column {} has no enum'.format(enum_column.name).encode('utf-8')
def test():
for column in tax_benefit_system.column_by_name.itervalues():
if isinstance(column, columns.EnumCol):
yield check, column
if __name__ == '__main__':
test()
|
Add test to detect Enums without enumeration.
|
Add test to detect Enums without enumeration.
|
Python
|
agpl-3.0
|
adrienpacifico/openfisca-france,adrienpacifico/openfisca-france,sgmap/openfisca-france,SophieIPP/openfisca-france,antoinearnoud/openfisca-france,benjello/openfisca-france,SophieIPP/openfisca-france,antoinearnoud/openfisca-france,benjello/openfisca-france,sgmap/openfisca-france
|
Add test to detect Enums without enumeration.
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import columns
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def check(enum_column):
assert enum_column.enum, u'Column {} has no enum'.format(enum_column.name).encode('utf-8')
def test():
for column in tax_benefit_system.column_by_name.itervalues():
if isinstance(column, columns.EnumCol):
yield check, column
if __name__ == '__main__':
test()
|
<commit_before><commit_msg>Add test to detect Enums without enumeration.<commit_after>
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import columns
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def check(enum_column):
assert enum_column.enum, u'Column {} has no enum'.format(enum_column.name).encode('utf-8')
def test():
for column in tax_benefit_system.column_by_name.itervalues():
if isinstance(column, columns.EnumCol):
yield check, column
if __name__ == '__main__':
test()
|
Add test to detect Enums without enumeration.# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import columns
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def check(enum_column):
assert enum_column.enum, u'Column {} has no enum'.format(enum_column.name).encode('utf-8')
def test():
for column in tax_benefit_system.column_by_name.itervalues():
if isinstance(column, columns.EnumCol):
yield check, column
if __name__ == '__main__':
test()
|
<commit_before><commit_msg>Add test to detect Enums without enumeration.<commit_after># -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import columns
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def check(enum_column):
assert enum_column.enum, u'Column {} has no enum'.format(enum_column.name).encode('utf-8')
def test():
for column in tax_benefit_system.column_by_name.itervalues():
if isinstance(column, columns.EnumCol):
yield check, column
if __name__ == '__main__':
test()
|
|
39d1e2e23acf28a79720aefca098e9af942ea44b
|
scripts/runmask-util.py
|
scripts/runmask-util.py
|
#!/usr/bin/env python
# Tobey Carman
# Dec 2016
import netCDF4 as nc
import argparse
import textwrap
def show_mask(file, note):
with nc.Dataset(file, 'r') as mask:
print "========== %s ==================================" % (note)
print "'%s'" % (file)
print mask.variables['run']
print mask.variables['run'][:]
print ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Helper script for modifying a dvm-dos-tem runmask netcdf file.
''')
)
parser.add_argument('file', metavar=('file'),
help=textwrap.dedent('''The runmask.nc file to operate on.'''))
parser.add_argument('--reset', action='store_true',
help=textwrap.dedent('''Set all pixels to zero (don't run).'''))
parser.add_argument("--xy", nargs=2,
help=textwrap.dedent('''The x, y position of the pixel to turn on.'''))
parser.add_argument("--show", action='store_true',
help=textwrap.dedent('''Print the mask after modification.'''))
args = parser.parse_args()
if args.show:
show_mask(args.file, "BEFORE")
with nc.Dataset(args.file, 'a') as mask:
if args.reset:
print "Setting all pixels in runmask to '0' (OFF)."
mask.variables['run'][:] = 0
if args.xy:
X,Y = args.xy
print "Turning pixel(x,y) to '1', (ON)."
mask.variables['run'][X,Y] = 1
# Show the after state
if args.show:
show_mask(args.file, "AFTER")
|
Add utility script for modifying run mask.
|
Add utility script for modifying run mask.
|
Python
|
mit
|
tobeycarman/dvm-dos-tem,tobeycarman/dvm-dos-tem,tobeycarman/dvm-dos-tem,tobeycarman/dvm-dos-tem,tobeycarman/dvm-dos-tem,tobeycarman/dvm-dos-tem
|
Add utility script for modifying run mask.
|
#!/usr/bin/env python
# Tobey Carman
# Dec 2016
import netCDF4 as nc
import argparse
import textwrap
def show_mask(file, note):
with nc.Dataset(file, 'r') as mask:
print "========== %s ==================================" % (note)
print "'%s'" % (file)
print mask.variables['run']
print mask.variables['run'][:]
print ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Helper script for modifying a dvm-dos-tem runmask netcdf file.
''')
)
parser.add_argument('file', metavar=('file'),
help=textwrap.dedent('''The runmask.nc file to operate on.'''))
parser.add_argument('--reset', action='store_true',
help=textwrap.dedent('''Set all pixels to zero (don't run).'''))
parser.add_argument("--xy", nargs=2,
help=textwrap.dedent('''The x, y position of the pixel to turn on.'''))
parser.add_argument("--show", action='store_true',
help=textwrap.dedent('''Print the mask after modification.'''))
args = parser.parse_args()
if args.show:
show_mask(args.file, "BEFORE")
with nc.Dataset(args.file, 'a') as mask:
if args.reset:
print "Setting all pixels in runmask to '0' (OFF)."
mask.variables['run'][:] = 0
if args.xy:
X,Y = args.xy
print "Turning pixel(x,y) to '1', (ON)."
mask.variables['run'][X,Y] = 1
# Show the after state
if args.show:
show_mask(args.file, "AFTER")
|
<commit_before><commit_msg>Add utility script for modifying run mask.<commit_after>
|
#!/usr/bin/env python
# Tobey Carman
# Dec 2016
import netCDF4 as nc
import argparse
import textwrap
def show_mask(file, note):
with nc.Dataset(file, 'r') as mask:
print "========== %s ==================================" % (note)
print "'%s'" % (file)
print mask.variables['run']
print mask.variables['run'][:]
print ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Helper script for modifying a dvm-dos-tem runmask netcdf file.
''')
)
parser.add_argument('file', metavar=('file'),
help=textwrap.dedent('''The runmask.nc file to operate on.'''))
parser.add_argument('--reset', action='store_true',
help=textwrap.dedent('''Set all pixels to zero (don't run).'''))
parser.add_argument("--xy", nargs=2,
help=textwrap.dedent('''The x, y position of the pixel to turn on.'''))
parser.add_argument("--show", action='store_true',
help=textwrap.dedent('''Print the mask after modification.'''))
args = parser.parse_args()
if args.show:
show_mask(args.file, "BEFORE")
with nc.Dataset(args.file, 'a') as mask:
if args.reset:
print "Setting all pixels in runmask to '0' (OFF)."
mask.variables['run'][:] = 0
if args.xy:
X,Y = args.xy
print "Turning pixel(x,y) to '1', (ON)."
mask.variables['run'][X,Y] = 1
# Show the after state
if args.show:
show_mask(args.file, "AFTER")
|
Add utility script for modifying run mask.#!/usr/bin/env python
# Tobey Carman
# Dec 2016
import netCDF4 as nc
import argparse
import textwrap
def show_mask(file, note):
with nc.Dataset(file, 'r') as mask:
print "========== %s ==================================" % (note)
print "'%s'" % (file)
print mask.variables['run']
print mask.variables['run'][:]
print ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Helper script for modifying a dvm-dos-tem runmask netcdf file.
''')
)
parser.add_argument('file', metavar=('file'),
help=textwrap.dedent('''The runmask.nc file to operate on.'''))
parser.add_argument('--reset', action='store_true',
help=textwrap.dedent('''Set all pixels to zero (don't run).'''))
parser.add_argument("--xy", nargs=2,
help=textwrap.dedent('''The x, y position of the pixel to turn on.'''))
parser.add_argument("--show", action='store_true',
help=textwrap.dedent('''Print the mask after modification.'''))
args = parser.parse_args()
if args.show:
show_mask(args.file, "BEFORE")
with nc.Dataset(args.file, 'a') as mask:
if args.reset:
print "Setting all pixels in runmask to '0' (OFF)."
mask.variables['run'][:] = 0
if args.xy:
X,Y = args.xy
print "Turning pixel(x,y) to '1', (ON)."
mask.variables['run'][X,Y] = 1
# Show the after state
if args.show:
show_mask(args.file, "AFTER")
|
<commit_before><commit_msg>Add utility script for modifying run mask.<commit_after>#!/usr/bin/env python
# Tobey Carman
# Dec 2016
import netCDF4 as nc
import argparse
import textwrap
def show_mask(file, note):
with nc.Dataset(file, 'r') as mask:
print "========== %s ==================================" % (note)
print "'%s'" % (file)
print mask.variables['run']
print mask.variables['run'][:]
print ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Helper script for modifying a dvm-dos-tem runmask netcdf file.
''')
)
parser.add_argument('file', metavar=('file'),
help=textwrap.dedent('''The runmask.nc file to operate on.'''))
parser.add_argument('--reset', action='store_true',
help=textwrap.dedent('''Set all pixels to zero (don't run).'''))
parser.add_argument("--xy", nargs=2,
help=textwrap.dedent('''The x, y position of the pixel to turn on.'''))
parser.add_argument("--show", action='store_true',
help=textwrap.dedent('''Print the mask after modification.'''))
args = parser.parse_args()
if args.show:
show_mask(args.file, "BEFORE")
with nc.Dataset(args.file, 'a') as mask:
if args.reset:
print "Setting all pixels in runmask to '0' (OFF)."
mask.variables['run'][:] = 0
if args.xy:
X,Y = args.xy
print "Turning pixel(x,y) to '1', (ON)."
mask.variables['run'][X,Y] = 1
# Show the after state
if args.show:
show_mask(args.file, "AFTER")
|
|
27dcea8076d02be193fddfdd13306f9f2d337689
|
recipes/metric_distance_board_pin_values.py
|
recipes/metric_distance_board_pin_values.py
|
import RPi.GPIO as GPIO
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
'''Calculate the distance of an object in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
# Use GPIO.BOARD values instead of BCM
trig_pin = 11
echo_pin = 13
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# using GPIO.BOARD pin values.
value = sensor.Measurement(trig_pin, echo_pin, gpio_mode=GPIO.BOARD)
raw_measurement = value.raw_distance()
# Calculate the distance in centimeters
metric_distance = value.distance_metric(raw_measurement)
print("The Distance = {} centimeters".format(metric_distance))
if __name__ == "__main__":
main()
|
Add recipe for board pin values
|
Add recipe for board pin values
|
Python
|
mit
|
alaudet/hcsr04sensor
|
Add recipe for board pin values
|
import RPi.GPIO as GPIO
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
'''Calculate the distance of an object in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
# Use GPIO.BOARD values instead of BCM
trig_pin = 11
echo_pin = 13
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# using GPIO.BOARD pin values.
value = sensor.Measurement(trig_pin, echo_pin, gpio_mode=GPIO.BOARD)
raw_measurement = value.raw_distance()
# Calculate the distance in centimeters
metric_distance = value.distance_metric(raw_measurement)
print("The Distance = {} centimeters".format(metric_distance))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add recipe for board pin values<commit_after>
|
import RPi.GPIO as GPIO
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
'''Calculate the distance of an object in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
# Use GPIO.BOARD values instead of BCM
trig_pin = 11
echo_pin = 13
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# using GPIO.BOARD pin values.
value = sensor.Measurement(trig_pin, echo_pin, gpio_mode=GPIO.BOARD)
raw_measurement = value.raw_distance()
# Calculate the distance in centimeters
metric_distance = value.distance_metric(raw_measurement)
print("The Distance = {} centimeters".format(metric_distance))
if __name__ == "__main__":
main()
|
Add recipe for board pin valuesimport RPi.GPIO as GPIO
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
'''Calculate the distance of an object in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
# Use GPIO.BOARD values instead of BCM
trig_pin = 11
echo_pin = 13
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# using GPIO.BOARD pin values.
value = sensor.Measurement(trig_pin, echo_pin, gpio_mode=GPIO.BOARD)
raw_measurement = value.raw_distance()
# Calculate the distance in centimeters
metric_distance = value.distance_metric(raw_measurement)
print("The Distance = {} centimeters".format(metric_distance))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add recipe for board pin values<commit_after>import RPi.GPIO as GPIO
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
'''Calculate the distance of an object in centimeters using a HCSR04 sensor
and a Raspberry Pi'''
# Use GPIO.BOARD values instead of BCM
trig_pin = 11
echo_pin = 13
# Default values
# unit = 'metric'
# temperature = 20
# round_to = 1
# Create a distance reading with the hcsr04 sensor module
# using GPIO.BOARD pin values.
value = sensor.Measurement(trig_pin, echo_pin, gpio_mode=GPIO.BOARD)
raw_measurement = value.raw_distance()
# Calculate the distance in centimeters
metric_distance = value.distance_metric(raw_measurement)
print("The Distance = {} centimeters".format(metric_distance))
if __name__ == "__main__":
main()
|
|
d0494a9475437e70f5f03576d9b8888aaadac458
|
migrations/versions/1815829d365_.py
|
migrations/versions/1815829d365_.py
|
"""empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geo app ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
|
Replace title_abr_idx with new unique index that includes geometry_application_reference
|
Replace title_abr_idx with new unique index that includes geometry_application_reference
|
Python
|
mit
|
LandRegistry/system-of-record,LandRegistry/system-of-record
|
Replace title_abr_idx with new unique index that includes geometry_application_reference
|
"""empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geo app ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
|
<commit_before><commit_msg>Replace title_abr_idx with new unique index that includes geometry_application_reference<commit_after>
|
"""empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geo app ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
|
Replace title_abr_idx with new unique index that includes geometry_application_reference"""empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geo app ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
|
<commit_before><commit_msg>Replace title_abr_idx with new unique index that includes geometry_application_reference<commit_after>"""empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geo app ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
|
|
dccde3aa82fa7b01cec4c8d2a0035cac7414c37a
|
sensor_interface/scripts/hc-05_interface.py
|
sensor_interface/scripts/hc-05_interface.py
|
import rospy
import serial
import Adafruit_BBIO.UART as UART
from std_msgs.msg import String
import binascii
class Hc05InterfaceNode(object):
def __init__(self):
rospy.init_node('bluetooth_node')
self.port = "/dev/tty04"
self.init_serial()
self.init_publisher()
self.read_bluetooth_data()
def init_serial(self):
UART.setup("UART4")
self.ser = serial.Serial(port=self.port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=1)
if not self.ser.is_open():
rospy.logwarn("Unable to open %s", self.ser)
def init_publisher(self):
self.pub_bluetooth = rospy.Publisher(
'sensors/bluetooth/data',
String,
queue_size=10)
def read_bluetooth_data(self):
while not rospy.is_shutdown():
BT_msg = _self._readline()
if len(BT_msg) > 0:
BT_msg = binascii.b2a_uu(BT_msg)
self.pub_bluetooth.publish(BT_msg)
rospy.Rate(10).sleep()
def _readline(self):
eol = b'\r'
leneol = len(eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
if __name__ == '__main__':
try:
bluetooth_node = Hc05InterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
Add interface script for bluetooth module
|
Add interface script for bluetooth module
|
Python
|
mit
|
vortexntnu/rov-control,vortexntnu/rov-control,vortexntnu/rov-control
|
Add interface script for bluetooth module
|
import rospy
import serial
import Adafruit_BBIO.UART as UART
from std_msgs.msg import String
import binascii
class Hc05InterfaceNode(object):
def __init__(self):
rospy.init_node('bluetooth_node')
self.port = "/dev/tty04"
self.init_serial()
self.init_publisher()
self.read_bluetooth_data()
def init_serial(self):
UART.setup("UART4")
self.ser = serial.Serial(port=self.port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=1)
if not self.ser.is_open():
rospy.logwarn("Unable to open %s", self.ser)
def init_publisher(self):
self.pub_bluetooth = rospy.Publisher(
'sensors/bluetooth/data',
String,
queue_size=10)
def read_bluetooth_data(self):
while not rospy.is_shutdown():
BT_msg = _self._readline()
if len(BT_msg) > 0:
BT_msg = binascii.b2a_uu(BT_msg)
self.pub_bluetooth.publish(BT_msg)
rospy.Rate(10).sleep()
def _readline(self):
eol = b'\r'
leneol = len(eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
if __name__ == '__main__':
try:
bluetooth_node = Hc05InterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
<commit_before><commit_msg>Add interface script for bluetooth module<commit_after>
|
import rospy
import serial
import Adafruit_BBIO.UART as UART
from std_msgs.msg import String
import binascii
class Hc05InterfaceNode(object):
def __init__(self):
rospy.init_node('bluetooth_node')
self.port = "/dev/tty04"
self.init_serial()
self.init_publisher()
self.read_bluetooth_data()
def init_serial(self):
UART.setup("UART4")
self.ser = serial.Serial(port=self.port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=1)
if not self.ser.is_open():
rospy.logwarn("Unable to open %s", self.ser)
def init_publisher(self):
self.pub_bluetooth = rospy.Publisher(
'sensors/bluetooth/data',
String,
queue_size=10)
def read_bluetooth_data(self):
while not rospy.is_shutdown():
BT_msg = _self._readline()
if len(BT_msg) > 0:
BT_msg = binascii.b2a_uu(BT_msg)
self.pub_bluetooth.publish(BT_msg)
rospy.Rate(10).sleep()
def _readline(self):
eol = b'\r'
leneol = len(eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
if __name__ == '__main__':
try:
bluetooth_node = Hc05InterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
Add interface script for bluetooth moduleimport rospy
import serial
import Adafruit_BBIO.UART as UART
from std_msgs.msg import String
import binascii
class Hc05InterfaceNode(object):
def __init__(self):
rospy.init_node('bluetooth_node')
self.port = "/dev/tty04"
self.init_serial()
self.init_publisher()
self.read_bluetooth_data()
def init_serial(self):
UART.setup("UART4")
self.ser = serial.Serial(port=self.port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=1)
if not self.ser.is_open():
rospy.logwarn("Unable to open %s", self.ser)
def init_publisher(self):
self.pub_bluetooth = rospy.Publisher(
'sensors/bluetooth/data',
String,
queue_size=10)
def read_bluetooth_data(self):
while not rospy.is_shutdown():
BT_msg = _self._readline()
if len(BT_msg) > 0:
BT_msg = binascii.b2a_uu(BT_msg)
self.pub_bluetooth.publish(BT_msg)
rospy.Rate(10).sleep()
def _readline(self):
eol = b'\r'
leneol = len(eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
if __name__ == '__main__':
try:
bluetooth_node = Hc05InterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
<commit_before><commit_msg>Add interface script for bluetooth module<commit_after>import rospy
import serial
import Adafruit_BBIO.UART as UART
from std_msgs.msg import String
import binascii
class Hc05InterfaceNode(object):
def __init__(self):
rospy.init_node('bluetooth_node')
self.port = "/dev/tty04"
self.init_serial()
self.init_publisher()
self.read_bluetooth_data()
def init_serial(self):
UART.setup("UART4")
self.ser = serial.Serial(port=self.port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=1)
if not self.ser.is_open():
rospy.logwarn("Unable to open %s", self.ser)
def init_publisher(self):
self.pub_bluetooth = rospy.Publisher(
'sensors/bluetooth/data',
String,
queue_size=10)
def read_bluetooth_data(self):
while not rospy.is_shutdown():
BT_msg = _self._readline()
if len(BT_msg) > 0:
BT_msg = binascii.b2a_uu(BT_msg)
self.pub_bluetooth.publish(BT_msg)
rospy.Rate(10).sleep()
def _readline(self):
eol = b'\r'
leneol = len(eol)
line = bytearray()
while True:
c = self.ser.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
else:
break
return bytes(line)
if __name__ == '__main__':
try:
bluetooth_node = Hc05InterfaceNode()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
|
a792cd4c1525cf42efa91ae5fcc308795d6fcc5d
|
openslides/utils/translation_ext.py
|
openslides/utils/translation_ext.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.translation_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Localizable descriptions for django permissions.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from django.utils.translation import ugettext
from django.forms import ChoiceField, ModelChoiceField, ModelMultipleChoiceField
class LocalizedModelChoiceField(ModelChoiceField):
def __init__(self, *args, **kwargs):
super(LocalizedModelChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
class LocalizedModelMultipleChoiceField(ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
self.to_field_name = kwargs.get('to_field_name', None)
super(LocalizedModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
def xugettext(msg, fixstr=False):
if fixstr:
return msg
else:
return ugettext(msg)
|
Support for localized permissions on top of the static django model.
|
Support for localized permissions on top of the static django model.
|
Python
|
mit
|
FinnStutzenstein/OpenSlides,CatoTH/OpenSlides,normanjaeckel/OpenSlides,FinnStutzenstein/OpenSlides,tsiegleauq/OpenSlides,tsiegleauq/OpenSlides,CatoTH/OpenSlides,ostcar/OpenSlides,normanjaeckel/OpenSlides,FinnStutzenstein/OpenSlides,OpenSlides/OpenSlides,emanuelschuetze/OpenSlides,jwinzer/OpenSlides,CatoTH/OpenSlides,emanuelschuetze/OpenSlides,ostcar/OpenSlides,emanuelschuetze/OpenSlides,jwinzer/OpenSlides,ostcar/OpenSlides,emanuelschuetze/OpenSlides,boehlke/OpenSlides,boehlke/OpenSlides,boehlke/OpenSlides,rolandgeider/OpenSlides,FinnStutzenstein/OpenSlides,tsiegleauq/OpenSlides,CatoTH/OpenSlides,normanjaeckel/OpenSlides,boehlke/OpenSlides,OpenSlides/OpenSlides,jwinzer/OpenSlides,normanjaeckel/OpenSlides,jwinzer/OpenSlides,rolandgeider/OpenSlides,jwinzer/OpenSlides,rolandgeider/OpenSlides
|
Support for localized permissions on top of the static django model.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.translation_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Localizable descriptions for django permissions.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from django.utils.translation import ugettext
from django.forms import ChoiceField, ModelChoiceField, ModelMultipleChoiceField
class LocalizedModelChoiceField(ModelChoiceField):
def __init__(self, *args, **kwargs):
super(LocalizedModelChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
class LocalizedModelMultipleChoiceField(ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
self.to_field_name = kwargs.get('to_field_name', None)
super(LocalizedModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
def xugettext(msg, fixstr=False):
if fixstr:
return msg
else:
return ugettext(msg)
|
<commit_before><commit_msg>Support for localized permissions on top of the static django model.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.translation_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Localizable descriptions for django permissions.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from django.utils.translation import ugettext
from django.forms import ChoiceField, ModelChoiceField, ModelMultipleChoiceField
class LocalizedModelChoiceField(ModelChoiceField):
def __init__(self, *args, **kwargs):
super(LocalizedModelChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
class LocalizedModelMultipleChoiceField(ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
self.to_field_name = kwargs.get('to_field_name', None)
super(LocalizedModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
def xugettext(msg, fixstr=False):
if fixstr:
return msg
else:
return ugettext(msg)
|
Support for localized permissions on top of the static django model.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.translation_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Localizable descriptions for django permissions.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from django.utils.translation import ugettext
from django.forms import ChoiceField, ModelChoiceField, ModelMultipleChoiceField
class LocalizedModelChoiceField(ModelChoiceField):
def __init__(self, *args, **kwargs):
super(LocalizedModelChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
class LocalizedModelMultipleChoiceField(ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
self.to_field_name = kwargs.get('to_field_name', None)
super(LocalizedModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
def xugettext(msg, fixstr=False):
if fixstr:
return msg
else:
return ugettext(msg)
|
<commit_before><commit_msg>Support for localized permissions on top of the static django model.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
openslides.utils.translation_ext
~~~~~~~~~~~~~~~~~~~~~~~~
Localizable descriptions for django permissions.
:copyright: 2011 by the OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
from django.utils.translation import ugettext
from django.forms import ChoiceField, ModelChoiceField, ModelMultipleChoiceField
class LocalizedModelChoiceField(ModelChoiceField):
def __init__(self, *args, **kwargs):
super(LocalizedModelChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
class LocalizedModelMultipleChoiceField(ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
self.to_field_name = kwargs.get('to_field_name', None)
super(LocalizedModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _localized_get_choices(self):
if hasattr(self, '_choices'):
return self._choices
c = []
for (id, text) in super(LocalizedModelMultipleChoiceField, self)._get_choices():
text = text.split(' | ')[-1]
c.append((id, ugettext(text)))
return c
choices = property(_localized_get_choices, ChoiceField._set_choices)
def xugettext(msg, fixstr=False):
if fixstr:
return msg
else:
return ugettext(msg)
|
|
518b5e8a634c77762916acd778b6c355f7c0a549
|
tests/utils/get_message_unit_tests.py
|
tests/utils/get_message_unit_tests.py
|
import unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add get message unit tests
|
Add get message unit tests
|
Python
|
mit
|
jdgillespie91/trackerSpend,jdgillespie91/trackerSpend
|
Add get message unit tests
|
import unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add get message unit tests<commit_after>
|
import unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add get message unit testsimport unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add get message unit tests<commit_after>import unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
c8d4e4d38d7836ab34b8bab251d7b56c07b88c30
|
src/test/basic/basic.py
|
src/test/basic/basic.py
|
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A':[('B',100),('C',20)], 'C':[('D',20)], 'D':[('B',20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n==n2:
return d
def cost(n,goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detects
|
Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detects
|
Python
|
bsd-3-clause
|
jrialland/python-astar,jrialland/python-astar
|
Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detects
|
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A':[('B',100),('C',20)], 'C':[('D',20)], 'D':[('B',20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n==n2:
return d
def cost(n,goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detects<commit_after>
|
import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A':[('B',100),('C',20)], 'C':[('D',20)], 'D':[('B',20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n==n2:
return d
def cost(n,goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detectsimport unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A':[('B',100),('C',20)], 'C':[('D',20)], 'D':[('B',20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n==n2:
return d
def cost(n,goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test in order to ensure that the alogorithm returns the path that is optimal and not the first path it detects<commit_after>import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A':[('B',100),('C',20)], 'C':[('D',20)], 'D':[('B',20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n==n2:
return d
def cost(n,goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
|
688f9eca5b73e4c0ee4c1cfac62fe668a40d9e22
|
test/unit/ggrc/models/test_table_args.py
|
test/unit/ggrc/models/test_table_args.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Tests for overriden __table_args__."""
import unittest
from ggrc.models.all_models import all_models
from ggrc.models.mixins import Identifiable
class TestTableArgs(unittest.TestCase):
def test_extra_args_included(self):
"""Table args for all models should contain extra args.
This can be violated if you inherit Identifiable but then also define
constraints via __table_args__ instead of _extra_table_args.
"""
for model in all_models:
self.assertTrue(issubclass(model, Identifiable))
extras = getattr(model, "_extra_table_args", None)
if not extras:
continue
if callable(extras):
extras = extras(model)
# Doing only constraint name checking because equality doen't work here
extra_names = {e.name for e in extras}
args_names = {a.name for a in model.__table_args__}
self.assertTrue(
extra_names.issubset(args_names),
"_extra_table_args for {} are not present in __table_args__"
.format(model.__name__)
)
|
Add a test for overriden __table_args__
|
Add a test for overriden __table_args__
|
Python
|
apache-2.0
|
andrei-karalionak/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,edofic/ggrc-core
|
Add a test for overriden __table_args__
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Tests for overriden __table_args__."""
import unittest
from ggrc.models.all_models import all_models
from ggrc.models.mixins import Identifiable
class TestTableArgs(unittest.TestCase):
def test_extra_args_included(self):
"""Table args for all models should contain extra args.
This can be violated if you inherit Identifiable but then also define
constraints via __table_args__ instead of _extra_table_args.
"""
for model in all_models:
self.assertTrue(issubclass(model, Identifiable))
extras = getattr(model, "_extra_table_args", None)
if not extras:
continue
if callable(extras):
extras = extras(model)
# Doing only constraint name checking because equality doen't work here
extra_names = {e.name for e in extras}
args_names = {a.name for a in model.__table_args__}
self.assertTrue(
extra_names.issubset(args_names),
"_extra_table_args for {} are not present in __table_args__"
.format(model.__name__)
)
|
<commit_before><commit_msg>Add a test for overriden __table_args__<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Tests for overriden __table_args__."""
import unittest
from ggrc.models.all_models import all_models
from ggrc.models.mixins import Identifiable
class TestTableArgs(unittest.TestCase):
def test_extra_args_included(self):
"""Table args for all models should contain extra args.
This can be violated if you inherit Identifiable but then also define
constraints via __table_args__ instead of _extra_table_args.
"""
for model in all_models:
self.assertTrue(issubclass(model, Identifiable))
extras = getattr(model, "_extra_table_args", None)
if not extras:
continue
if callable(extras):
extras = extras(model)
# Doing only constraint name checking because equality doen't work here
extra_names = {e.name for e in extras}
args_names = {a.name for a in model.__table_args__}
self.assertTrue(
extra_names.issubset(args_names),
"_extra_table_args for {} are not present in __table_args__"
.format(model.__name__)
)
|
Add a test for overriden __table_args__# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Tests for overriden __table_args__."""
import unittest
from ggrc.models.all_models import all_models
from ggrc.models.mixins import Identifiable
class TestTableArgs(unittest.TestCase):
def test_extra_args_included(self):
"""Table args for all models should contain extra args.
This can be violated if you inherit Identifiable but then also define
constraints via __table_args__ instead of _extra_table_args.
"""
for model in all_models:
self.assertTrue(issubclass(model, Identifiable))
extras = getattr(model, "_extra_table_args", None)
if not extras:
continue
if callable(extras):
extras = extras(model)
# Doing only constraint name checking because equality doen't work here
extra_names = {e.name for e in extras}
args_names = {a.name for a in model.__table_args__}
self.assertTrue(
extra_names.issubset(args_names),
"_extra_table_args for {} are not present in __table_args__"
.format(model.__name__)
)
|
<commit_before><commit_msg>Add a test for overriden __table_args__<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Tests for overriden __table_args__."""
import unittest
from ggrc.models.all_models import all_models
from ggrc.models.mixins import Identifiable
class TestTableArgs(unittest.TestCase):
def test_extra_args_included(self):
"""Table args for all models should contain extra args.
This can be violated if you inherit Identifiable but then also define
constraints via __table_args__ instead of _extra_table_args.
"""
for model in all_models:
self.assertTrue(issubclass(model, Identifiable))
extras = getattr(model, "_extra_table_args", None)
if not extras:
continue
if callable(extras):
extras = extras(model)
# Doing only constraint name checking because equality doen't work here
extra_names = {e.name for e in extras}
args_names = {a.name for a in model.__table_args__}
self.assertTrue(
extra_names.issubset(args_names),
"_extra_table_args for {} are not present in __table_args__"
.format(model.__name__)
)
|
|
4395a165c421821717c6b8f30479e53fe56ab34c
|
examples/gwfilter_search.py
|
examples/gwfilter_search.py
|
"""Module giving some examples how to use PyDOV to query boreholes."""
def get_description():
"""The description gives information about the Boring type."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
gwfilter = GrondwaterFilterSearch()
print(gwfilter.get_description())
def get_groundwaterfilters_in_hamme():
"""Get all details of the boreholes where 'gemeente' is 'Herstappe'."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
gwfilter = GrondwaterFilterSearch()
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Hamme')
df = gwfilter.search(query=query)
print(df)
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
# get_fields()
# Uncomment one of these to see the output:
#
get_groundwaterfilters_in_hamme()
# get_borehole_depth_in_gent()
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
# get_deep_boreholes_in_bounding_box()
# get_borehole_purpose_in_blankenberge()
|
Add example for filter search
|
Add example for filter search
|
Python
|
mit
|
DOV-Vlaanderen/pydov
|
Add example for filter search
|
"""Module giving some examples how to use PyDOV to query boreholes."""
def get_description():
"""The description gives information about the Boring type."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
gwfilter = GrondwaterFilterSearch()
print(gwfilter.get_description())
def get_groundwaterfilters_in_hamme():
"""Get all details of the boreholes where 'gemeente' is 'Herstappe'."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
gwfilter = GrondwaterFilterSearch()
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Hamme')
df = gwfilter.search(query=query)
print(df)
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
# get_fields()
# Uncomment one of these to see the output:
#
get_groundwaterfilters_in_hamme()
# get_borehole_depth_in_gent()
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
# get_deep_boreholes_in_bounding_box()
# get_borehole_purpose_in_blankenberge()
|
<commit_before><commit_msg>Add example for filter search<commit_after>
|
"""Module giving some examples how to use PyDOV to query boreholes."""
def get_description():
"""The description gives information about the Boring type."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
gwfilter = GrondwaterFilterSearch()
print(gwfilter.get_description())
def get_groundwaterfilters_in_hamme():
"""Get all details of the boreholes where 'gemeente' is 'Herstappe'."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
gwfilter = GrondwaterFilterSearch()
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Hamme')
df = gwfilter.search(query=query)
print(df)
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
# get_fields()
# Uncomment one of these to see the output:
#
get_groundwaterfilters_in_hamme()
# get_borehole_depth_in_gent()
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
# get_deep_boreholes_in_bounding_box()
# get_borehole_purpose_in_blankenberge()
|
Add example for filter search"""Module giving some examples how to use PyDOV to query boreholes."""
def get_description():
"""The description gives information about the Boring type."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
gwfilter = GrondwaterFilterSearch()
print(gwfilter.get_description())
def get_groundwaterfilters_in_hamme():
"""Get all details of the boreholes where 'gemeente' is 'Herstappe'."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
gwfilter = GrondwaterFilterSearch()
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Hamme')
df = gwfilter.search(query=query)
print(df)
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
# get_fields()
# Uncomment one of these to see the output:
#
get_groundwaterfilters_in_hamme()
# get_borehole_depth_in_gent()
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
# get_deep_boreholes_in_bounding_box()
# get_borehole_purpose_in_blankenberge()
|
<commit_before><commit_msg>Add example for filter search<commit_after>"""Module giving some examples how to use PyDOV to query boreholes."""
def get_description():
"""The description gives information about the Boring type."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
gwfilter = GrondwaterFilterSearch()
print(gwfilter.get_description())
def get_groundwaterfilters_in_hamme():
"""Get all details of the boreholes where 'gemeente' is 'Herstappe'."""
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from owslib.fes import PropertyIsEqualTo
gwfilter = GrondwaterFilterSearch()
query = PropertyIsEqualTo(propertyname='gemeente',
literal='Hamme')
df = gwfilter.search(query=query)
print(df)
if __name__ == '__main__':
# Comment out to skip these examples:
get_description()
# get_fields()
# Uncomment one of these to see the output:
#
get_groundwaterfilters_in_hamme()
# get_borehole_depth_in_gent()
# get_deep_boreholes()
# get_groundwater_related_boreholes_in_antwerp()
# get_boreholes_in_bounding_box()
# get_deep_boreholes_in_bounding_box()
# get_borehole_purpose_in_blankenberge()
|
|
6998cbfd57366b8bcc598b1b89bb102b65e198c0
|
bin/prxtransfer-dns-station-migration.py
|
bin/prxtransfer-dns-station-migration.py
|
import boto3
client = boto3.client("route53")
# List the FTP subdomains that should have explicit DNS records added
subdomains = [
"wxyz",
]
changes = []
for subdomain in subdomains:
changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": f"{subdomain}.prxtransfer.org",
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z26RNL4JYFTOTI",
"DNSName": "infra-FtpSe-1W1OF5U4X8M3Z-284373e0ff42a3aa.elb.us-east-1.amazonaws.com",
"EvaluateTargetHealth": False,
},
},
}
)
client.change_resource_record_sets(
HostedZoneId="Z2DOBCW7CSO5EP",
ChangeBatch={"Changes": changes},
)
|
Add migration script for prxtransfer.org DNS records
|
Add migration script for prxtransfer.org DNS records
|
Python
|
mit
|
PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure,PRX/Infrastructure
|
Add migration script for prxtransfer.org DNS records
|
import boto3
client = boto3.client("route53")
# List the FTP subdomains that should have explicit DNS records added
subdomains = [
"wxyz",
]
changes = []
for subdomain in subdomains:
changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": f"{subdomain}.prxtransfer.org",
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z26RNL4JYFTOTI",
"DNSName": "infra-FtpSe-1W1OF5U4X8M3Z-284373e0ff42a3aa.elb.us-east-1.amazonaws.com",
"EvaluateTargetHealth": False,
},
},
}
)
client.change_resource_record_sets(
HostedZoneId="Z2DOBCW7CSO5EP",
ChangeBatch={"Changes": changes},
)
|
<commit_before><commit_msg>Add migration script for prxtransfer.org DNS records<commit_after>
|
import boto3
client = boto3.client("route53")
# List the FTP subdomains that should have explicit DNS records added
subdomains = [
"wxyz",
]
changes = []
for subdomain in subdomains:
changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": f"{subdomain}.prxtransfer.org",
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z26RNL4JYFTOTI",
"DNSName": "infra-FtpSe-1W1OF5U4X8M3Z-284373e0ff42a3aa.elb.us-east-1.amazonaws.com",
"EvaluateTargetHealth": False,
},
},
}
)
client.change_resource_record_sets(
HostedZoneId="Z2DOBCW7CSO5EP",
ChangeBatch={"Changes": changes},
)
|
Add migration script for prxtransfer.org DNS recordsimport boto3
client = boto3.client("route53")
# List the FTP subdomains that should have explicit DNS records added
subdomains = [
"wxyz",
]
changes = []
for subdomain in subdomains:
changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": f"{subdomain}.prxtransfer.org",
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z26RNL4JYFTOTI",
"DNSName": "infra-FtpSe-1W1OF5U4X8M3Z-284373e0ff42a3aa.elb.us-east-1.amazonaws.com",
"EvaluateTargetHealth": False,
},
},
}
)
client.change_resource_record_sets(
HostedZoneId="Z2DOBCW7CSO5EP",
ChangeBatch={"Changes": changes},
)
|
<commit_before><commit_msg>Add migration script for prxtransfer.org DNS records<commit_after>import boto3
client = boto3.client("route53")
# List the FTP subdomains that should have explicit DNS records added
subdomains = [
"wxyz",
]
changes = []
for subdomain in subdomains:
changes.append(
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": f"{subdomain}.prxtransfer.org",
"Type": "A",
"AliasTarget": {
"HostedZoneId": "Z26RNL4JYFTOTI",
"DNSName": "infra-FtpSe-1W1OF5U4X8M3Z-284373e0ff42a3aa.elb.us-east-1.amazonaws.com",
"EvaluateTargetHealth": False,
},
},
}
)
client.change_resource_record_sets(
HostedZoneId="Z2DOBCW7CSO5EP",
ChangeBatch={"Changes": changes},
)
|
|
c2120e991d5556196af1960207ee6a2cb3a9ba30
|
raspicam/main.py
|
raspicam/main.py
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
class Application:
def __init__(self, config):
self.config = config
self.frame_generator = USBCam().frame_generator()
def run_gui(self):
for frame in detect(self.frame_generator):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frame_generator, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frame_generator):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
Remove "active" statements from constructor
|
Remove "active" statements from constructor
|
Python
|
mit
|
exhuma/raspicam,exhuma/raspicam,exhuma/raspicam
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
class Application:
def __init__(self, config):
self.config = config
self.frame_generator = USBCam().frame_generator()
def run_gui(self):
for frame in detect(self.frame_generator):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frame_generator, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frame_generator):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
Remove "active" statements from constructor
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
<commit_before>import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
class Application:
def __init__(self, config):
self.config = config
self.frame_generator = USBCam().frame_generator()
def run_gui(self):
for frame in detect(self.frame_generator):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frame_generator, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frame_generator):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
<commit_msg>Remove "active" statements from constructor<commit_after>
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
class Application:
def __init__(self, config):
self.config = config
self.frame_generator = USBCam().frame_generator()
def run_gui(self):
for frame in detect(self.frame_generator):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frame_generator, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frame_generator):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
Remove "active" statements from constructorimport logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
<commit_before>import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
class Application:
def __init__(self, config):
self.config = config
self.frame_generator = USBCam().frame_generator()
def run_gui(self):
for frame in detect(self.frame_generator):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frame_generator, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frame_generator):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
<commit_msg>Remove "active" statements from constructor<commit_after>import logging
from config_resolver import Config
import cv2
from camera import USBCam
from processing import detect
from raspicam.webui import make_app
LOG = logging.getLogger(__name__)
class Application:
def __init__(self, config):
self.config = config
self.initialised = False
self.frames = []
def init(self):
if not self.initialised:
self.frames = USBCam().frame_generator()
self.initialised = True
LOG.info('Application successfully initialised.')
def run_gui(self):
self.init()
for frame in detect(self.frames):
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
def run_webui(self):
app = make_app(self.frames, self.config)
app.run(host='0.0.0.0', debug=True, threaded=True)
def run_cli(self):
for frame in detect(self.frames):
pass
logging.basicConfig(level=0)
config = Config('exhuma', 'raspicam', require_load=True)
# print(config.get('video', 'format', default='h264'))
app = Application(config)
app.run_cli()
|
7a5f75c36ee70382763fc3c8f9d7441390a30e37
|
plot_generator.py
|
plot_generator.py
|
__author__ = 'Anti'
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
def getWave(freq, time, duty=0.5):
np_square_wave_period = 2 * np.pi
return [i if i != -1 else 0 for i in scipy.signal.square(time * np_square_wave_period * freq, duty=duty)]
def newSignalPlot(signal, plot_nr, freq):
ax = plt.subplot(3, 1, plot_nr)
plt.tight_layout()
plt.title(str(freq)+" Hz")
plt.ylim(-0.5, 1.5)
ax.set_yticks(ticks=[0, 1])
ax.set_xticks(ticks=[float(i)/60 for i in range(60)], minor=True)
ax.set_xlabel("Time (s)")
ax.set_ylabel("State")
ax.grid(which='minor', alpha=0.7)
ax.grid(which='major', alpha=0.7)
#plt.grid(which="both")
plt.plot(time, signal)
def squareWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11a, 2, 11)
newSignalPlot(wave12a, 3, 12)
def rectWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11b, 2, 11)
newSignalPlot(wave12b, 3, 12)
time = np.linspace(0, 1, 6000)
wave10 = getWave(10, time)
wave11b = [1 if i < 3 or i > 6 and i < 9 or i > 11 and i < 14 or i > 17 and i < 20 or i > 22 and i < 25 or i > 28 and i < 30 or \
i > 33 and i < 36 or i > 39 and i < 41 or i > 44 and i < 47 or i > 50 and i < 52 or i > 55 and i < 58 else 0 for i in time*60]
wave11a = getWave(11, time)
wave12a = getWave(12, time)
wave12b = getWave(12, time, duty=0.6)
rectWaves()
#squareWaves()
# fft11 = np.abs(np.fft.rfft(wave10)**2)
# plt.subplot(212)
# plt.xlim(0, 100)
# plt.grid(True)
# plt.plot(fft11)
plt.show()
|
Add python plot generator (pyplot).
|
Add python plot generator (pyplot).
|
Python
|
mit
|
kahvel/VEP-BCI
|
Add python plot generator (pyplot).
|
__author__ = 'Anti'
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
def getWave(freq, time, duty=0.5):
np_square_wave_period = 2 * np.pi
return [i if i != -1 else 0 for i in scipy.signal.square(time * np_square_wave_period * freq, duty=duty)]
def newSignalPlot(signal, plot_nr, freq):
ax = plt.subplot(3, 1, plot_nr)
plt.tight_layout()
plt.title(str(freq)+" Hz")
plt.ylim(-0.5, 1.5)
ax.set_yticks(ticks=[0, 1])
ax.set_xticks(ticks=[float(i)/60 for i in range(60)], minor=True)
ax.set_xlabel("Time (s)")
ax.set_ylabel("State")
ax.grid(which='minor', alpha=0.7)
ax.grid(which='major', alpha=0.7)
#plt.grid(which="both")
plt.plot(time, signal)
def squareWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11a, 2, 11)
newSignalPlot(wave12a, 3, 12)
def rectWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11b, 2, 11)
newSignalPlot(wave12b, 3, 12)
time = np.linspace(0, 1, 6000)
wave10 = getWave(10, time)
wave11b = [1 if i < 3 or i > 6 and i < 9 or i > 11 and i < 14 or i > 17 and i < 20 or i > 22 and i < 25 or i > 28 and i < 30 or \
i > 33 and i < 36 or i > 39 and i < 41 or i > 44 and i < 47 or i > 50 and i < 52 or i > 55 and i < 58 else 0 for i in time*60]
wave11a = getWave(11, time)
wave12a = getWave(12, time)
wave12b = getWave(12, time, duty=0.6)
rectWaves()
#squareWaves()
# fft11 = np.abs(np.fft.rfft(wave10)**2)
# plt.subplot(212)
# plt.xlim(0, 100)
# plt.grid(True)
# plt.plot(fft11)
plt.show()
|
<commit_before><commit_msg>Add python plot generator (pyplot).<commit_after>
|
__author__ = 'Anti'
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
def getWave(freq, time, duty=0.5):
np_square_wave_period = 2 * np.pi
return [i if i != -1 else 0 for i in scipy.signal.square(time * np_square_wave_period * freq, duty=duty)]
def newSignalPlot(signal, plot_nr, freq):
ax = plt.subplot(3, 1, plot_nr)
plt.tight_layout()
plt.title(str(freq)+" Hz")
plt.ylim(-0.5, 1.5)
ax.set_yticks(ticks=[0, 1])
ax.set_xticks(ticks=[float(i)/60 for i in range(60)], minor=True)
ax.set_xlabel("Time (s)")
ax.set_ylabel("State")
ax.grid(which='minor', alpha=0.7)
ax.grid(which='major', alpha=0.7)
#plt.grid(which="both")
plt.plot(time, signal)
def squareWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11a, 2, 11)
newSignalPlot(wave12a, 3, 12)
def rectWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11b, 2, 11)
newSignalPlot(wave12b, 3, 12)
time = np.linspace(0, 1, 6000)
wave10 = getWave(10, time)
wave11b = [1 if i < 3 or i > 6 and i < 9 or i > 11 and i < 14 or i > 17 and i < 20 or i > 22 and i < 25 or i > 28 and i < 30 or \
i > 33 and i < 36 or i > 39 and i < 41 or i > 44 and i < 47 or i > 50 and i < 52 or i > 55 and i < 58 else 0 for i in time*60]
wave11a = getWave(11, time)
wave12a = getWave(12, time)
wave12b = getWave(12, time, duty=0.6)
rectWaves()
#squareWaves()
# fft11 = np.abs(np.fft.rfft(wave10)**2)
# plt.subplot(212)
# plt.xlim(0, 100)
# plt.grid(True)
# plt.plot(fft11)
plt.show()
|
Add python plot generator (pyplot).__author__ = 'Anti'
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
def getWave(freq, time, duty=0.5):
np_square_wave_period = 2 * np.pi
return [i if i != -1 else 0 for i in scipy.signal.square(time * np_square_wave_period * freq, duty=duty)]
def newSignalPlot(signal, plot_nr, freq):
ax = plt.subplot(3, 1, plot_nr)
plt.tight_layout()
plt.title(str(freq)+" Hz")
plt.ylim(-0.5, 1.5)
ax.set_yticks(ticks=[0, 1])
ax.set_xticks(ticks=[float(i)/60 for i in range(60)], minor=True)
ax.set_xlabel("Time (s)")
ax.set_ylabel("State")
ax.grid(which='minor', alpha=0.7)
ax.grid(which='major', alpha=0.7)
#plt.grid(which="both")
plt.plot(time, signal)
def squareWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11a, 2, 11)
newSignalPlot(wave12a, 3, 12)
def rectWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11b, 2, 11)
newSignalPlot(wave12b, 3, 12)
time = np.linspace(0, 1, 6000)
wave10 = getWave(10, time)
wave11b = [1 if i < 3 or i > 6 and i < 9 or i > 11 and i < 14 or i > 17 and i < 20 or i > 22 and i < 25 or i > 28 and i < 30 or \
i > 33 and i < 36 or i > 39 and i < 41 or i > 44 and i < 47 or i > 50 and i < 52 or i > 55 and i < 58 else 0 for i in time*60]
wave11a = getWave(11, time)
wave12a = getWave(12, time)
wave12b = getWave(12, time, duty=0.6)
rectWaves()
#squareWaves()
# fft11 = np.abs(np.fft.rfft(wave10)**2)
# plt.subplot(212)
# plt.xlim(0, 100)
# plt.grid(True)
# plt.plot(fft11)
plt.show()
|
<commit_before><commit_msg>Add python plot generator (pyplot).<commit_after>__author__ = 'Anti'
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
def getWave(freq, time, duty=0.5):
np_square_wave_period = 2 * np.pi
return [i if i != -1 else 0 for i in scipy.signal.square(time * np_square_wave_period * freq, duty=duty)]
def newSignalPlot(signal, plot_nr, freq):
ax = plt.subplot(3, 1, plot_nr)
plt.tight_layout()
plt.title(str(freq)+" Hz")
plt.ylim(-0.5, 1.5)
ax.set_yticks(ticks=[0, 1])
ax.set_xticks(ticks=[float(i)/60 for i in range(60)], minor=True)
ax.set_xlabel("Time (s)")
ax.set_ylabel("State")
ax.grid(which='minor', alpha=0.7)
ax.grid(which='major', alpha=0.7)
#plt.grid(which="both")
plt.plot(time, signal)
def squareWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11a, 2, 11)
newSignalPlot(wave12a, 3, 12)
def rectWaves():
newSignalPlot(wave10, 1, 10)
newSignalPlot(wave11b, 2, 11)
newSignalPlot(wave12b, 3, 12)
time = np.linspace(0, 1, 6000)
wave10 = getWave(10, time)
wave11b = [1 if i < 3 or i > 6 and i < 9 or i > 11 and i < 14 or i > 17 and i < 20 or i > 22 and i < 25 or i > 28 and i < 30 or \
i > 33 and i < 36 or i > 39 and i < 41 or i > 44 and i < 47 or i > 50 and i < 52 or i > 55 and i < 58 else 0 for i in time*60]
wave11a = getWave(11, time)
wave12a = getWave(12, time)
wave12b = getWave(12, time, duty=0.6)
rectWaves()
#squareWaves()
# fft11 = np.abs(np.fft.rfft(wave10)**2)
# plt.subplot(212)
# plt.xlim(0, 100)
# plt.grid(True)
# plt.plot(fft11)
plt.show()
|
|
ce3d667bb44c29b0d163ec2e1c8aa87d3cfe718c
|
horsephrase/_guess_guess.py
|
horsephrase/_guess_guess.py
|
# Guess how many guesses it will take to guess a password.
from __future__ import unicode_literals
from ._implementation import words
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism))
def redivmod(initial_value, factors):
"""
Chop up C{initial_value} according to the list of C{factors} and return a
formatted string.
"""
result = []
value = initial_value
for divisor, label in factors:
if not divisor:
remainder = value
if not remainder:
break
else:
value, remainder = divmod(value, divisor)
if not value and not remainder:
break
if remainder == 1:
# depluralize
label = label[:-1]
addition = unicode(remainder) + ' ' + unicode(label)
if not result:
addition = 'and ' + addition
result.insert(0, addition)
return ', '.join(result)
def humantime(seconds):
"""
A human-readable interpretation of a time interval.
@param seconds: A number of seconds.
@type seconds: The type of seconds.
@return: A string describing the time interval.
@rtype: L{unicode}
"""
return redivmod(seconds, [(60, "seconds"),
(60, "minutes"),
(24, "hours"),
(7, "days"),
(52, "weeks"),
(0, "years")])
if __name__ == "__main__":
import sys
print(humantime(how_long(*map(int, sys.argv[1:]))))
|
Put my back-of-the-envelope math here to check against.
|
Put my back-of-the-envelope math here to check against.
|
Python
|
mit
|
glyph/horsephrase
|
Put my back-of-the-envelope math here to check against.
|
# Guess how many guesses it will take to guess a password.
from __future__ import unicode_literals
from ._implementation import words
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism))
def redivmod(initial_value, factors):
"""
Chop up C{initial_value} according to the list of C{factors} and return a
formatted string.
"""
result = []
value = initial_value
for divisor, label in factors:
if not divisor:
remainder = value
if not remainder:
break
else:
value, remainder = divmod(value, divisor)
if not value and not remainder:
break
if remainder == 1:
# depluralize
label = label[:-1]
addition = unicode(remainder) + ' ' + unicode(label)
if not result:
addition = 'and ' + addition
result.insert(0, addition)
return ', '.join(result)
def humantime(seconds):
"""
A human-readable interpretation of a time interval.
@param seconds: A number of seconds.
@type seconds: The type of seconds.
@return: A string describing the time interval.
@rtype: L{unicode}
"""
return redivmod(seconds, [(60, "seconds"),
(60, "minutes"),
(24, "hours"),
(7, "days"),
(52, "weeks"),
(0, "years")])
if __name__ == "__main__":
import sys
print(humantime(how_long(*map(int, sys.argv[1:]))))
|
<commit_before><commit_msg>Put my back-of-the-envelope math here to check against.<commit_after>
|
# Guess how many guesses it will take to guess a password.
from __future__ import unicode_literals
from ._implementation import words
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism))
def redivmod(initial_value, factors):
"""
Chop up C{initial_value} according to the list of C{factors} and return a
formatted string.
"""
result = []
value = initial_value
for divisor, label in factors:
if not divisor:
remainder = value
if not remainder:
break
else:
value, remainder = divmod(value, divisor)
if not value and not remainder:
break
if remainder == 1:
# depluralize
label = label[:-1]
addition = unicode(remainder) + ' ' + unicode(label)
if not result:
addition = 'and ' + addition
result.insert(0, addition)
return ', '.join(result)
def humantime(seconds):
"""
A human-readable interpretation of a time interval.
@param seconds: A number of seconds.
@type seconds: The type of seconds.
@return: A string describing the time interval.
@rtype: L{unicode}
"""
return redivmod(seconds, [(60, "seconds"),
(60, "minutes"),
(24, "hours"),
(7, "days"),
(52, "weeks"),
(0, "years")])
if __name__ == "__main__":
import sys
print(humantime(how_long(*map(int, sys.argv[1:]))))
|
Put my back-of-the-envelope math here to check against.# Guess how many guesses it will take to guess a password.
from __future__ import unicode_literals
from ._implementation import words
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism))
def redivmod(initial_value, factors):
"""
Chop up C{initial_value} according to the list of C{factors} and return a
formatted string.
"""
result = []
value = initial_value
for divisor, label in factors:
if not divisor:
remainder = value
if not remainder:
break
else:
value, remainder = divmod(value, divisor)
if not value and not remainder:
break
if remainder == 1:
# depluralize
label = label[:-1]
addition = unicode(remainder) + ' ' + unicode(label)
if not result:
addition = 'and ' + addition
result.insert(0, addition)
return ', '.join(result)
def humantime(seconds):
"""
A human-readable interpretation of a time interval.
@param seconds: A number of seconds.
@type seconds: The type of seconds.
@return: A string describing the time interval.
@rtype: L{unicode}
"""
return redivmod(seconds, [(60, "seconds"),
(60, "minutes"),
(24, "hours"),
(7, "days"),
(52, "weeks"),
(0, "years")])
if __name__ == "__main__":
import sys
print(humantime(how_long(*map(int, sys.argv[1:]))))
|
<commit_before><commit_msg>Put my back-of-the-envelope math here to check against.<commit_after># Guess how many guesses it will take to guess a password.
from __future__ import unicode_literals
from ._implementation import words
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism))
def redivmod(initial_value, factors):
"""
Chop up C{initial_value} according to the list of C{factors} and return a
formatted string.
"""
result = []
value = initial_value
for divisor, label in factors:
if not divisor:
remainder = value
if not remainder:
break
else:
value, remainder = divmod(value, divisor)
if not value and not remainder:
break
if remainder == 1:
# depluralize
label = label[:-1]
addition = unicode(remainder) + ' ' + unicode(label)
if not result:
addition = 'and ' + addition
result.insert(0, addition)
return ', '.join(result)
def humantime(seconds):
"""
A human-readable interpretation of a time interval.
@param seconds: A number of seconds.
@type seconds: The type of seconds.
@return: A string describing the time interval.
@rtype: L{unicode}
"""
return redivmod(seconds, [(60, "seconds"),
(60, "minutes"),
(24, "hours"),
(7, "days"),
(52, "weeks"),
(0, "years")])
if __name__ == "__main__":
import sys
print(humantime(how_long(*map(int, sys.argv[1:]))))
|
|
51479ca43a03329ec563a6218b44fc062712be4d
|
indra/tests/test_mgi_client.py
|
indra/tests/test_mgi_client.py
|
from indra.databases import mgi_client
def test_lookups():
assert mgi_client.get_id_from_name('Braf') == '88190'
assert mgi_client.get_name_from_id('1926283') == 'Pgap6'
assert mgi_client.get_id_from_name_synonym('Pgap6') == '1926283'
assert mgi_client.get_id_from_name_synonym('Tmem8') == '1926283'
assert isinstance(mgi_client.get_id_from_name_synonym('EGF-TM7'), list)
|
Add some tests for MGI client
|
Add some tests for MGI client
|
Python
|
bsd-2-clause
|
johnbachman/indra,johnbachman/indra,sorgerlab/indra,bgyori/indra,bgyori/indra,bgyori/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/indra
|
Add some tests for MGI client
|
from indra.databases import mgi_client
def test_lookups():
assert mgi_client.get_id_from_name('Braf') == '88190'
assert mgi_client.get_name_from_id('1926283') == 'Pgap6'
assert mgi_client.get_id_from_name_synonym('Pgap6') == '1926283'
assert mgi_client.get_id_from_name_synonym('Tmem8') == '1926283'
assert isinstance(mgi_client.get_id_from_name_synonym('EGF-TM7'), list)
|
<commit_before><commit_msg>Add some tests for MGI client<commit_after>
|
from indra.databases import mgi_client
def test_lookups():
assert mgi_client.get_id_from_name('Braf') == '88190'
assert mgi_client.get_name_from_id('1926283') == 'Pgap6'
assert mgi_client.get_id_from_name_synonym('Pgap6') == '1926283'
assert mgi_client.get_id_from_name_synonym('Tmem8') == '1926283'
assert isinstance(mgi_client.get_id_from_name_synonym('EGF-TM7'), list)
|
Add some tests for MGI clientfrom indra.databases import mgi_client
def test_lookups():
assert mgi_client.get_id_from_name('Braf') == '88190'
assert mgi_client.get_name_from_id('1926283') == 'Pgap6'
assert mgi_client.get_id_from_name_synonym('Pgap6') == '1926283'
assert mgi_client.get_id_from_name_synonym('Tmem8') == '1926283'
assert isinstance(mgi_client.get_id_from_name_synonym('EGF-TM7'), list)
|
<commit_before><commit_msg>Add some tests for MGI client<commit_after>from indra.databases import mgi_client
def test_lookups():
assert mgi_client.get_id_from_name('Braf') == '88190'
assert mgi_client.get_name_from_id('1926283') == 'Pgap6'
assert mgi_client.get_id_from_name_synonym('Pgap6') == '1926283'
assert mgi_client.get_id_from_name_synonym('Tmem8') == '1926283'
assert isinstance(mgi_client.get_id_from_name_synonym('EGF-TM7'), list)
|
|
159962e6d1c8f1a855c356f458aeb7079cb6b07f
|
report_qweb_element_page_visibility/__openerp__.py
|
report_qweb_element_page_visibility/__openerp__.py
|
# -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '9.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
|
Migrate report_qweb_element_page_visibility module to 9.0
|
[IMP] Migrate report_qweb_element_page_visibility module to 9.0
|
Python
|
agpl-3.0
|
OCA/reporting-engine,OCA/reporting-engine,OCA/reporting-engine,OCA/reporting-engine
|
[IMP] Migrate report_qweb_element_page_visibility module to 9.0
|
# -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '9.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
|
<commit_before><commit_msg>[IMP] Migrate report_qweb_element_page_visibility module to 9.0<commit_after>
|
# -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '9.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
|
[IMP] Migrate report_qweb_element_page_visibility module to 9.0# -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '9.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
|
<commit_before><commit_msg>[IMP] Migrate report_qweb_element_page_visibility module to 9.0<commit_after># -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '9.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
|
|
3c78b03aa3d0455290562ef31b84d7c8f5a91c9e
|
tests/test_ext_tasks.py
|
tests/test_ext_tasks.py
|
# -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
|
Test regressions for the task extension
|
Test regressions for the task extension
* tests for testing regression of 19ad64a
* fix edge case for test running within 3 minutes of the start of the hour
|
Python
|
mit
|
Rapptz/discord.py,Harmon758/discord.py,Harmon758/discord.py,rapptz/discord.py
|
Test regressions for the task extension
* tests for testing regression of 19ad64a
* fix edge case for test running within 3 minutes of the start of the hour
|
# -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
|
<commit_before><commit_msg>Test regressions for the task extension
* tests for testing regression of 19ad64a
* fix edge case for test running within 3 minutes of the start of the hour<commit_after>
|
# -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
|
Test regressions for the task extension
* tests for testing regression of 19ad64a
* fix edge case for test running within 3 minutes of the start of the hour# -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
|
<commit_before><commit_msg>Test regressions for the task extension
* tests for testing regression of 19ad64a
* fix edge case for test running within 3 minutes of the start of the hour<commit_after># -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
|
|
7700aad28e7bd0053e3e8dba823b446822e2be73
|
conftest.py
|
conftest.py
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# Compressors is not fast, disable it in tests.
settings.COMPRESS_ENABLED = False
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
Disable compressor in test suite (compressor makes the test suite run 50% slower)
|
Disable compressor in test suite (compressor makes the test suite run 50% slower)
|
Python
|
bsd-3-clause
|
mitsuhiko/sentry,JamesMura/sentry,JTCunning/sentry,imankulov/sentry,gencer/sentry,fotinakis/sentry,ewdurbin/sentry,JamesMura/sentry,Natim/sentry,beni55/sentry,jean/sentry,hongliang5623/sentry,kevinlondon/sentry,nicholasserra/sentry,zenefits/sentry,BuildingLink/sentry,argonemyth/sentry,drcapulet/sentry,boneyao/sentry,SilentCircle/sentry,ewdurbin/sentry,gg7/sentry,Kryz/sentry,daevaorn/sentry,looker/sentry,gencer/sentry,llonchj/sentry,camilonova/sentry,jean/sentry,ifduyue/sentry,1tush/sentry,vperron/sentry,zenefits/sentry,vperron/sentry,JamesMura/sentry,rdio/sentry,wujuguang/sentry,NickPresta/sentry,gg7/sentry,Natim/sentry,gencer/sentry,pauloschilling/sentry,llonchj/sentry,BuildingLink/sentry,ngonzalvez/sentry,JTCunning/sentry,argonemyth/sentry,SilentCircle/sentry,SilentCircle/sentry,BuildingLink/sentry,rdio/sentry,hongliang5623/sentry,daevaorn/sentry,NickPresta/sentry,JackDanger/sentry,gencer/sentry,drcapulet/sentry,jean/sentry,korealerts1/sentry,mvaled/sentry,1tush/sentry,wong2/sentry,jean/sentry,zenefits/sentry,ifduyue/sentry,looker/sentry,kevinastone/sentry,fotinakis/sentry,fuziontech/sentry,wujuguang/sentry,mitsuhiko/sentry,korealerts1/sentry,BuildingLink/sentry,songyi199111/sentry,JamesMura/sentry,NickPresta/sentry,JTCunning/sentry,TedaLIEz/sentry,JackDanger/sentry,beni55/sentry,daevaorn/sentry,looker/sentry,felixbuenemann/sentry,ifduyue/sentry,zenefits/sentry,songyi199111/sentry,kevinlondon/sentry,alexm92/sentry,jokey2k/sentry,pauloschilling/sentry,ngonzalvez/sentry,kevinlondon/sentry,beeftornado/sentry,fotinakis/sentry,mvaled/sentry,imankulov/sentry,mvaled/sentry,mvaled/sentry,felixbuenemann/sentry,rdio/sentry,wong2/sentry,boneyao/sentry,ewdurbin/sentry,BayanGroup/sentry,nicholasserra/sentry,looker/sentry,wong2/sentry,beni55/sentry,camilonova/sentry,Kryz/sentry,mvaled/sentry,boneyao/sentry,imankulov/sentry,gg7/sentry,fuziontech/sentry,BuildingLink/sentry,argonemyth/sentry,kevinastone/sentry,BayanGroup/sentry,daevaorn/sentry,alexm92/sentry,nicholasserra/sentry,felixbuenemann/sentry,JamesMura/sentry,beeftornado/sentry,wujuguang/sentry,JackDanger/sentry,songyi199111/sentry,camilonova/sentry,zenefits/sentry,ngonzalvez/sentry,hongliang5623/sentry,jokey2k/sentry,ifduyue/sentry,llonchj/sentry,jean/sentry,gencer/sentry,rdio/sentry,drcapulet/sentry,1tush/sentry,NickPresta/sentry,TedaLIEz/sentry,ifduyue/sentry,kevinastone/sentry,korealerts1/sentry,mvaled/sentry,jokey2k/sentry,beeftornado/sentry,pauloschilling/sentry,BayanGroup/sentry,alexm92/sentry,vperron/sentry,Kryz/sentry,fotinakis/sentry,fuziontech/sentry,SilentCircle/sentry,looker/sentry,TedaLIEz/sentry,Natim/sentry
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
Disable compressor in test suite (compressor makes the test suite run 50% slower)
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# Compressors is not fast, disable it in tests.
settings.COMPRESS_ENABLED = False
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
<commit_before>from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
<commit_msg>Disable compressor in test suite (compressor makes the test suite run 50% slower)<commit_after>
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# Compressors is not fast, disable it in tests.
settings.COMPRESS_ENABLED = False
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
Disable compressor in test suite (compressor makes the test suite run 50% slower)from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# Compressors is not fast, disable it in tests.
settings.COMPRESS_ENABLED = False
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
<commit_before>from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
<commit_msg>Disable compressor in test suite (compressor makes the test suite run 50% slower)<commit_after>from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# Compressors is not fast, disable it in tests.
settings.COMPRESS_ENABLED = False
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
|
80d726c48142bc83d7154e9554c1fd8053b81883
|
tests/test_bmipytest.py
|
tests/test_bmipytest.py
|
from bmi_tester.bmipytest import load_component
entry_point = 'pymt_hydrotrend.bmi:Hydrotrend'
module_name, cls_name = entry_point.split(":")
def test_component_is_string():
component = load_component(entry_point)
assert isinstance(component, str)
def test_component_is_classname():
component = load_component(entry_point)
assert component == cls_name
|
Add tests for load_component function
|
Add tests for load_component function
It uses the hydrotrend plugin, which is a default installed with pymt.
This might make the tests fragile.
|
Python
|
mit
|
csdms/bmi-tester
|
Add tests for load_component function
It uses the hydrotrend plugin, which is a default installed with pymt.
This might make the tests fragile.
|
from bmi_tester.bmipytest import load_component
entry_point = 'pymt_hydrotrend.bmi:Hydrotrend'
module_name, cls_name = entry_point.split(":")
def test_component_is_string():
component = load_component(entry_point)
assert isinstance(component, str)
def test_component_is_classname():
component = load_component(entry_point)
assert component == cls_name
|
<commit_before><commit_msg>Add tests for load_component function
It uses the hydrotrend plugin, which is a default installed with pymt.
This might make the tests fragile.<commit_after>
|
from bmi_tester.bmipytest import load_component
entry_point = 'pymt_hydrotrend.bmi:Hydrotrend'
module_name, cls_name = entry_point.split(":")
def test_component_is_string():
component = load_component(entry_point)
assert isinstance(component, str)
def test_component_is_classname():
component = load_component(entry_point)
assert component == cls_name
|
Add tests for load_component function
It uses the hydrotrend plugin, which is a default installed with pymt.
This might make the tests fragile.from bmi_tester.bmipytest import load_component
entry_point = 'pymt_hydrotrend.bmi:Hydrotrend'
module_name, cls_name = entry_point.split(":")
def test_component_is_string():
component = load_component(entry_point)
assert isinstance(component, str)
def test_component_is_classname():
component = load_component(entry_point)
assert component == cls_name
|
<commit_before><commit_msg>Add tests for load_component function
It uses the hydrotrend plugin, which is a default installed with pymt.
This might make the tests fragile.<commit_after>from bmi_tester.bmipytest import load_component
entry_point = 'pymt_hydrotrend.bmi:Hydrotrend'
module_name, cls_name = entry_point.split(":")
def test_component_is_string():
component = load_component(entry_point)
assert isinstance(component, str)
def test_component_is_classname():
component = load_component(entry_point)
assert component == cls_name
|
|
76aef076be0db297b8e4979d55acb28c798ade4a
|
test/src/unittest/extractor/test_musicextractor.py
|
test/src/unittest/extractor/test_musicextractor.py
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMusicExtractor(TestCase):
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.aiff" therefore...
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testSilence(self):
inputFilename = join(testdata.audio_dir, 'generated', 'silence', 'silence.flac')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
return
def testCorruptFile(self):
inputFilename = join(testdata.audio_dir, 'generated', 'unsupported.au')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testComputeValid(self):
# Simply checks if computation succeeded. Ideally, we would need
# a regression test for each descriptor in the pool.
inputFilename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
pool, poolFrames = MusicExtractor()(inputFilename)
self.assertValidPool(pool)
self.assertValidPool(poolFrames)
def testRobustness(self):
# TODO test that computed descriptors are similar across formats
return
suite = allTests(TestMusicExtractor)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
Add unit test for MusicExtractor
|
Add unit test for MusicExtractor
Similar to the test for FreesoundExtractor
|
Python
|
agpl-3.0
|
carthach/essentia,carthach/essentia,MTG/essentia,MTG/essentia,carthach/essentia,MTG/essentia,MTG/essentia,MTG/essentia,carthach/essentia,carthach/essentia
|
Add unit test for MusicExtractor
Similar to the test for FreesoundExtractor
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMusicExtractor(TestCase):
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.aiff" therefore...
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testSilence(self):
inputFilename = join(testdata.audio_dir, 'generated', 'silence', 'silence.flac')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
return
def testCorruptFile(self):
inputFilename = join(testdata.audio_dir, 'generated', 'unsupported.au')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testComputeValid(self):
# Simply checks if computation succeeded. Ideally, we would need
# a regression test for each descriptor in the pool.
inputFilename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
pool, poolFrames = MusicExtractor()(inputFilename)
self.assertValidPool(pool)
self.assertValidPool(poolFrames)
def testRobustness(self):
# TODO test that computed descriptors are similar across formats
return
suite = allTests(TestMusicExtractor)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add unit test for MusicExtractor
Similar to the test for FreesoundExtractor<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMusicExtractor(TestCase):
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.aiff" therefore...
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testSilence(self):
inputFilename = join(testdata.audio_dir, 'generated', 'silence', 'silence.flac')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
return
def testCorruptFile(self):
inputFilename = join(testdata.audio_dir, 'generated', 'unsupported.au')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testComputeValid(self):
# Simply checks if computation succeeded. Ideally, we would need
# a regression test for each descriptor in the pool.
inputFilename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
pool, poolFrames = MusicExtractor()(inputFilename)
self.assertValidPool(pool)
self.assertValidPool(poolFrames)
def testRobustness(self):
# TODO test that computed descriptors are similar across formats
return
suite = allTests(TestMusicExtractor)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
Add unit test for MusicExtractor
Similar to the test for FreesoundExtractor#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMusicExtractor(TestCase):
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.aiff" therefore...
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testSilence(self):
inputFilename = join(testdata.audio_dir, 'generated', 'silence', 'silence.flac')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
return
def testCorruptFile(self):
inputFilename = join(testdata.audio_dir, 'generated', 'unsupported.au')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testComputeValid(self):
# Simply checks if computation succeeded. Ideally, we would need
# a regression test for each descriptor in the pool.
inputFilename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
pool, poolFrames = MusicExtractor()(inputFilename)
self.assertValidPool(pool)
self.assertValidPool(poolFrames)
def testRobustness(self):
# TODO test that computed descriptors are similar across formats
return
suite = allTests(TestMusicExtractor)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add unit test for MusicExtractor
Similar to the test for FreesoundExtractor<commit_after>#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMusicExtractor(TestCase):
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.aiff" therefore...
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testSilence(self):
inputFilename = join(testdata.audio_dir, 'generated', 'silence', 'silence.flac')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
return
def testCorruptFile(self):
inputFilename = join(testdata.audio_dir, 'generated', 'unsupported.au')
self.assertRaises(RuntimeError, lambda: MusicExtractor()(inputFilename))
def testComputeValid(self):
# Simply checks if computation succeeded. Ideally, we would need
# a regression test for each descriptor in the pool.
inputFilename = join(testdata.audio_dir, 'recorded', 'dubstep.wav')
pool, poolFrames = MusicExtractor()(inputFilename)
self.assertValidPool(pool)
self.assertValidPool(poolFrames)
def testRobustness(self):
# TODO test that computed descriptors are similar across formats
return
suite = allTests(TestMusicExtractor)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
|
339a72d10d707d5981515fd9c8ad85f85ea59aa0
|
portal/migrations/versions/0fa4a0bd6595_.py
|
portal/migrations/versions/0fa4a0bd6595_.py
|
"""empty message
Revision ID: 0fa4a0bd6595
Revises: b68a125b8470
Create Date: 2017-03-07 19:12:13.758000
"""
# revision identifiers, used by Alembic.
revision = '0fa4a0bd6595'
down_revision = 'b68a125b8470'
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("commit") # get around transaction warnings by faking a commit
op.execute(
"ALTER TYPE auth_methods ADD VALUE IF NOT EXISTS 'service_token_authenticated'");
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Add migration to pick up missing enumeration value for encounter.auth_method
|
Add migration to pick up missing enumeration value for encounter.auth_method
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Add migration to pick up missing enumeration value for encounter.auth_method
|
"""empty message
Revision ID: 0fa4a0bd6595
Revises: b68a125b8470
Create Date: 2017-03-07 19:12:13.758000
"""
# revision identifiers, used by Alembic.
revision = '0fa4a0bd6595'
down_revision = 'b68a125b8470'
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("commit") # get around transaction warnings by faking a commit
op.execute(
"ALTER TYPE auth_methods ADD VALUE IF NOT EXISTS 'service_token_authenticated'");
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration to pick up missing enumeration value for encounter.auth_method<commit_after>
|
"""empty message
Revision ID: 0fa4a0bd6595
Revises: b68a125b8470
Create Date: 2017-03-07 19:12:13.758000
"""
# revision identifiers, used by Alembic.
revision = '0fa4a0bd6595'
down_revision = 'b68a125b8470'
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("commit") # get around transaction warnings by faking a commit
op.execute(
"ALTER TYPE auth_methods ADD VALUE IF NOT EXISTS 'service_token_authenticated'");
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Add migration to pick up missing enumeration value for encounter.auth_method"""empty message
Revision ID: 0fa4a0bd6595
Revises: b68a125b8470
Create Date: 2017-03-07 19:12:13.758000
"""
# revision identifiers, used by Alembic.
revision = '0fa4a0bd6595'
down_revision = 'b68a125b8470'
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("commit") # get around transaction warnings by faking a commit
op.execute(
"ALTER TYPE auth_methods ADD VALUE IF NOT EXISTS 'service_token_authenticated'");
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration to pick up missing enumeration value for encounter.auth_method<commit_after>"""empty message
Revision ID: 0fa4a0bd6595
Revises: b68a125b8470
Create Date: 2017-03-07 19:12:13.758000
"""
# revision identifiers, used by Alembic.
revision = '0fa4a0bd6595'
down_revision = 'b68a125b8470'
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("commit") # get around transaction warnings by faking a commit
op.execute(
"ALTER TYPE auth_methods ADD VALUE IF NOT EXISTS 'service_token_authenticated'");
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
|
0ecb75cfd685cb8c2bd20b8704b8b6efe62244c5
|
traits/util/toposort.py
|
traits/util/toposort.py
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#
#-----------------------------------------------------------------------------
""" A simple topological sort on a dictionary graph.
"""
class CyclicGraph(Exception):
"""
Exception for cyclic graphs.
"""
def __init__(self):
Exception.__init__(self, "Graph is cyclic")
def topological_sort(graph):
"""
Returns the nodes in the graph in topological order.
"""
discovered = {}
explored = {}
order = []
def explore(node):
children = graph.get(node, [])
for child in children:
if child in explored:
pass
elif child in discovered:
raise CyclicGraph()
else:
discovered[child] = 1
explore(child)
explored[node] = 1
order.append(node)
for node in graph.keys():
if node not in explored:
explore(node)
order.reverse()
return order
|
Add the topological_sort function back to traits.util.
|
BUG: Add the topological_sort function back to traits.util.
|
Python
|
bsd-3-clause
|
burnpanck/traits,burnpanck/traits
|
BUG: Add the topological_sort function back to traits.util.
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#
#-----------------------------------------------------------------------------
""" A simple topological sort on a dictionary graph.
"""
class CyclicGraph(Exception):
"""
Exception for cyclic graphs.
"""
def __init__(self):
Exception.__init__(self, "Graph is cyclic")
def topological_sort(graph):
"""
Returns the nodes in the graph in topological order.
"""
discovered = {}
explored = {}
order = []
def explore(node):
children = graph.get(node, [])
for child in children:
if child in explored:
pass
elif child in discovered:
raise CyclicGraph()
else:
discovered[child] = 1
explore(child)
explored[node] = 1
order.append(node)
for node in graph.keys():
if node not in explored:
explore(node)
order.reverse()
return order
|
<commit_before><commit_msg>BUG: Add the topological_sort function back to traits.util.<commit_after>
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#
#-----------------------------------------------------------------------------
""" A simple topological sort on a dictionary graph.
"""
class CyclicGraph(Exception):
"""
Exception for cyclic graphs.
"""
def __init__(self):
Exception.__init__(self, "Graph is cyclic")
def topological_sort(graph):
"""
Returns the nodes in the graph in topological order.
"""
discovered = {}
explored = {}
order = []
def explore(node):
children = graph.get(node, [])
for child in children:
if child in explored:
pass
elif child in discovered:
raise CyclicGraph()
else:
discovered[child] = 1
explore(child)
explored[node] = 1
order.append(node)
for node in graph.keys():
if node not in explored:
explore(node)
order.reverse()
return order
|
BUG: Add the topological_sort function back to traits.util.#-----------------------------------------------------------------------------
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#
#-----------------------------------------------------------------------------
""" A simple topological sort on a dictionary graph.
"""
class CyclicGraph(Exception):
"""
Exception for cyclic graphs.
"""
def __init__(self):
Exception.__init__(self, "Graph is cyclic")
def topological_sort(graph):
"""
Returns the nodes in the graph in topological order.
"""
discovered = {}
explored = {}
order = []
def explore(node):
children = graph.get(node, [])
for child in children:
if child in explored:
pass
elif child in discovered:
raise CyclicGraph()
else:
discovered[child] = 1
explore(child)
explored[node] = 1
order.append(node)
for node in graph.keys():
if node not in explored:
explore(node)
order.reverse()
return order
|
<commit_before><commit_msg>BUG: Add the topological_sort function back to traits.util.<commit_after>#-----------------------------------------------------------------------------
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#
#-----------------------------------------------------------------------------
""" A simple topological sort on a dictionary graph.
"""
class CyclicGraph(Exception):
"""
Exception for cyclic graphs.
"""
def __init__(self):
Exception.__init__(self, "Graph is cyclic")
def topological_sort(graph):
"""
Returns the nodes in the graph in topological order.
"""
discovered = {}
explored = {}
order = []
def explore(node):
children = graph.get(node, [])
for child in children:
if child in explored:
pass
elif child in discovered:
raise CyclicGraph()
else:
discovered[child] = 1
explore(child)
explored[node] = 1
order.append(node)
for node in graph.keys():
if node not in explored:
explore(node)
order.reverse()
return order
|
|
f5143f2782d2c09fe482b77fa77e9740e5c92710
|
cfp/migrations/0048_auto_20150412_0740.py
|
cfp/migrations/0048_auto_20150412_0740.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0047_auto_20150412_0647'),
]
operations = [
migrations.RemoveField(
model_name='conference',
name='topics',
),
migrations.RemoveField(
model_name='savedsearch',
name='topic',
),
]
|
Remove topics from models migration
|
Remove topics from models migration
|
Python
|
mit
|
kyleconroy/speakers,kyleconroy/speakers,kyleconroy/speakers
|
Remove topics from models migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0047_auto_20150412_0647'),
]
operations = [
migrations.RemoveField(
model_name='conference',
name='topics',
),
migrations.RemoveField(
model_name='savedsearch',
name='topic',
),
]
|
<commit_before><commit_msg>Remove topics from models migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0047_auto_20150412_0647'),
]
operations = [
migrations.RemoveField(
model_name='conference',
name='topics',
),
migrations.RemoveField(
model_name='savedsearch',
name='topic',
),
]
|
Remove topics from models migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0047_auto_20150412_0647'),
]
operations = [
migrations.RemoveField(
model_name='conference',
name='topics',
),
migrations.RemoveField(
model_name='savedsearch',
name='topic',
),
]
|
<commit_before><commit_msg>Remove topics from models migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cfp', '0047_auto_20150412_0647'),
]
operations = [
migrations.RemoveField(
model_name='conference',
name='topics',
),
migrations.RemoveField(
model_name='savedsearch',
name='topic',
),
]
|
|
70227e79e1cd6bac97e35a1d701996e401fea7f2
|
diaspy/conversations.py
|
diaspy/conversations.py
|
import requests
class Conversation:
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
def __init__(self, conv_id, client):
"""
:param conversation_id: id of the post and not the guid!
:type conversation_id: str
:param client: client object used to authenticate
:type client: client.Client
.. note::
The login function of the client should be called,
before calling any of the post functions.
"""
self._client = client
self.conv_id = conv_id
def get_data(self):
""" returns the plain json data
"""
r = self._client.session.get(self._client.pod +
'/conversations/' +
self.conv_id +
'.json')
if r.status_code == 200:
return r.json()['conversation']
else:
raise Exception('wrong status code: ' + str(r.status_code))
def answer(self, text):
""" answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': self._client.get_token()}
r = self._client.session.post(self._client.pod +
'/conversations/' +
self.conv_id +
'/messages',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 200:
raise Exception(str(r.status_code) +
': Answer could not be posted.')
return r.json()
def delete(self):
""" delete this conversation
has to be implemented
"""
data = {'authenticity_token': self._client.get_token()}
r = self._client.session.delete(self._client.pod + '/conversations/' +
self.conv_id +
'/visibility/',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 404:
raise Exception(str(r.status_code) +
': Conversation could not be deleted.')
def get_subject(self):
""" return the subject of this conversation
"""
return self.get_data()['subject']
|
Create new Conversations, answer to old ones or delete them
|
Create new Conversations, answer to old ones or delete them
|
Python
|
mit
|
marekjm/diaspy
|
Create new Conversations, answer to old ones or delete them
|
import requests
class Conversation:
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
def __init__(self, conv_id, client):
"""
:param conversation_id: id of the post and not the guid!
:type conversation_id: str
:param client: client object used to authenticate
:type client: client.Client
.. note::
The login function of the client should be called,
before calling any of the post functions.
"""
self._client = client
self.conv_id = conv_id
def get_data(self):
""" returns the plain json data
"""
r = self._client.session.get(self._client.pod +
'/conversations/' +
self.conv_id +
'.json')
if r.status_code == 200:
return r.json()['conversation']
else:
raise Exception('wrong status code: ' + str(r.status_code))
def answer(self, text):
""" answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': self._client.get_token()}
r = self._client.session.post(self._client.pod +
'/conversations/' +
self.conv_id +
'/messages',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 200:
raise Exception(str(r.status_code) +
': Answer could not be posted.')
return r.json()
def delete(self):
""" delete this conversation
has to be implemented
"""
data = {'authenticity_token': self._client.get_token()}
r = self._client.session.delete(self._client.pod + '/conversations/' +
self.conv_id +
'/visibility/',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 404:
raise Exception(str(r.status_code) +
': Conversation could not be deleted.')
def get_subject(self):
""" return the subject of this conversation
"""
return self.get_data()['subject']
|
<commit_before><commit_msg>Create new Conversations, answer to old ones or delete them<commit_after>
|
import requests
class Conversation:
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
def __init__(self, conv_id, client):
"""
:param conversation_id: id of the post and not the guid!
:type conversation_id: str
:param client: client object used to authenticate
:type client: client.Client
.. note::
The login function of the client should be called,
before calling any of the post functions.
"""
self._client = client
self.conv_id = conv_id
def get_data(self):
""" returns the plain json data
"""
r = self._client.session.get(self._client.pod +
'/conversations/' +
self.conv_id +
'.json')
if r.status_code == 200:
return r.json()['conversation']
else:
raise Exception('wrong status code: ' + str(r.status_code))
def answer(self, text):
""" answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': self._client.get_token()}
r = self._client.session.post(self._client.pod +
'/conversations/' +
self.conv_id +
'/messages',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 200:
raise Exception(str(r.status_code) +
': Answer could not be posted.')
return r.json()
def delete(self):
""" delete this conversation
has to be implemented
"""
data = {'authenticity_token': self._client.get_token()}
r = self._client.session.delete(self._client.pod + '/conversations/' +
self.conv_id +
'/visibility/',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 404:
raise Exception(str(r.status_code) +
': Conversation could not be deleted.')
def get_subject(self):
""" return the subject of this conversation
"""
return self.get_data()['subject']
|
Create new Conversations, answer to old ones or delete themimport requests
class Conversation:
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
def __init__(self, conv_id, client):
"""
:param conversation_id: id of the post and not the guid!
:type conversation_id: str
:param client: client object used to authenticate
:type client: client.Client
.. note::
The login function of the client should be called,
before calling any of the post functions.
"""
self._client = client
self.conv_id = conv_id
def get_data(self):
""" returns the plain json data
"""
r = self._client.session.get(self._client.pod +
'/conversations/' +
self.conv_id +
'.json')
if r.status_code == 200:
return r.json()['conversation']
else:
raise Exception('wrong status code: ' + str(r.status_code))
def answer(self, text):
""" answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': self._client.get_token()}
r = self._client.session.post(self._client.pod +
'/conversations/' +
self.conv_id +
'/messages',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 200:
raise Exception(str(r.status_code) +
': Answer could not be posted.')
return r.json()
def delete(self):
""" delete this conversation
has to be implemented
"""
data = {'authenticity_token': self._client.get_token()}
r = self._client.session.delete(self._client.pod + '/conversations/' +
self.conv_id +
'/visibility/',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 404:
raise Exception(str(r.status_code) +
': Conversation could not be deleted.')
def get_subject(self):
""" return the subject of this conversation
"""
return self.get_data()['subject']
|
<commit_before><commit_msg>Create new Conversations, answer to old ones or delete them<commit_after>import requests
class Conversation:
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
def __init__(self, conv_id, client):
"""
:param conversation_id: id of the post and not the guid!
:type conversation_id: str
:param client: client object used to authenticate
:type client: client.Client
.. note::
The login function of the client should be called,
before calling any of the post functions.
"""
self._client = client
self.conv_id = conv_id
def get_data(self):
""" returns the plain json data
"""
r = self._client.session.get(self._client.pod +
'/conversations/' +
self.conv_id +
'.json')
if r.status_code == 200:
return r.json()['conversation']
else:
raise Exception('wrong status code: ' + str(r.status_code))
def answer(self, text):
""" answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': self._client.get_token()}
r = self._client.session.post(self._client.pod +
'/conversations/' +
self.conv_id +
'/messages',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 200:
raise Exception(str(r.status_code) +
': Answer could not be posted.')
return r.json()
def delete(self):
""" delete this conversation
has to be implemented
"""
data = {'authenticity_token': self._client.get_token()}
r = self._client.session.delete(self._client.pod + '/conversations/' +
self.conv_id +
'/visibility/',
data=data,
headers={'accept': 'application/json'})
if r.status_code != 404:
raise Exception(str(r.status_code) +
': Conversation could not be deleted.')
def get_subject(self):
""" return the subject of this conversation
"""
return self.get_data()['subject']
|
|
85246afcca06fe60567392216ffc6805521594d3
|
billjobs/tests/tests_export_account_email.py
|
billjobs/tests/tests_export_account_email.py
|
from django.test import TestCase
from billjobs.admin import UserAdmin
class EmailExportTestCase(TestCase):
""" Tests for email account export """
def test_action_is_avaible(self):
""" Test admin can select the action in dropdown list """
self.assertTrue(hasattr(UserAdmin, 'export_email'))
|
Add test for email account export
|
Add test for email account export
|
Python
|
mit
|
ioO/billjobs
|
Add test for email account export
|
from django.test import TestCase
from billjobs.admin import UserAdmin
class EmailExportTestCase(TestCase):
""" Tests for email account export """
def test_action_is_avaible(self):
""" Test admin can select the action in dropdown list """
self.assertTrue(hasattr(UserAdmin, 'export_email'))
|
<commit_before><commit_msg>Add test for email account export<commit_after>
|
from django.test import TestCase
from billjobs.admin import UserAdmin
class EmailExportTestCase(TestCase):
""" Tests for email account export """
def test_action_is_avaible(self):
""" Test admin can select the action in dropdown list """
self.assertTrue(hasattr(UserAdmin, 'export_email'))
|
Add test for email account exportfrom django.test import TestCase
from billjobs.admin import UserAdmin
class EmailExportTestCase(TestCase):
""" Tests for email account export """
def test_action_is_avaible(self):
""" Test admin can select the action in dropdown list """
self.assertTrue(hasattr(UserAdmin, 'export_email'))
|
<commit_before><commit_msg>Add test for email account export<commit_after>from django.test import TestCase
from billjobs.admin import UserAdmin
class EmailExportTestCase(TestCase):
""" Tests for email account export """
def test_action_is_avaible(self):
""" Test admin can select the action in dropdown list """
self.assertTrue(hasattr(UserAdmin, 'export_email'))
|
|
dd9b0833fa55fc2e4bbc9e4453c303c9cd15caed
|
day-03-2.py
|
day-03-2.py
|
with open('day-03-input.txt', 'r') as f:
puzzle_input = f.read().replace('\n', '')
# Coordinates: (x, y)
class Santa:
def __init__(self):
self.x = 0
self.y = 0
def move(self, direction):
if direction == '^':
self.y += 1
elif direction == '>':
self.x += 1
elif direction == '<':
self.x += -1
elif direction == 'v':
self.y += -1
def get_pos(self):
return (self.x, self.y)
# House = {(x, y): presents}
santa = Santa()
robosanta = Santa()
houses = {(0, 0): 2}
turn = 0
for direction in puzzle_input:
if turn == 0:
santa.move(direction)
position = santa.get_pos()
turn = 1
elif turn == 1:
robosanta.move(direction)
position = robosanta.get_pos()
turn = 0
if position in houses:
houses[position] += 1
else:
houses[position] = 1
def total_with_presents():
total = 0
for num in houses.values():
if num > 0:
total += 1
return total
print(total_with_presents())
# My answer: 2360
|
Complete day 3 part 2
|
Complete day 3 part 2
|
Python
|
mit
|
foxscotch/advent-of-code,foxscotch/advent-of-code
|
Complete day 3 part 2
|
with open('day-03-input.txt', 'r') as f:
puzzle_input = f.read().replace('\n', '')
# Coordinates: (x, y)
class Santa:
def __init__(self):
self.x = 0
self.y = 0
def move(self, direction):
if direction == '^':
self.y += 1
elif direction == '>':
self.x += 1
elif direction == '<':
self.x += -1
elif direction == 'v':
self.y += -1
def get_pos(self):
return (self.x, self.y)
# House = {(x, y): presents}
santa = Santa()
robosanta = Santa()
houses = {(0, 0): 2}
turn = 0
for direction in puzzle_input:
if turn == 0:
santa.move(direction)
position = santa.get_pos()
turn = 1
elif turn == 1:
robosanta.move(direction)
position = robosanta.get_pos()
turn = 0
if position in houses:
houses[position] += 1
else:
houses[position] = 1
def total_with_presents():
total = 0
for num in houses.values():
if num > 0:
total += 1
return total
print(total_with_presents())
# My answer: 2360
|
<commit_before><commit_msg>Complete day 3 part 2<commit_after>
|
with open('day-03-input.txt', 'r') as f:
puzzle_input = f.read().replace('\n', '')
# Coordinates: (x, y)
class Santa:
def __init__(self):
self.x = 0
self.y = 0
def move(self, direction):
if direction == '^':
self.y += 1
elif direction == '>':
self.x += 1
elif direction == '<':
self.x += -1
elif direction == 'v':
self.y += -1
def get_pos(self):
return (self.x, self.y)
# House = {(x, y): presents}
santa = Santa()
robosanta = Santa()
houses = {(0, 0): 2}
turn = 0
for direction in puzzle_input:
if turn == 0:
santa.move(direction)
position = santa.get_pos()
turn = 1
elif turn == 1:
robosanta.move(direction)
position = robosanta.get_pos()
turn = 0
if position in houses:
houses[position] += 1
else:
houses[position] = 1
def total_with_presents():
total = 0
for num in houses.values():
if num > 0:
total += 1
return total
print(total_with_presents())
# My answer: 2360
|
Complete day 3 part 2with open('day-03-input.txt', 'r') as f:
puzzle_input = f.read().replace('\n', '')
# Coordinates: (x, y)
class Santa:
def __init__(self):
self.x = 0
self.y = 0
def move(self, direction):
if direction == '^':
self.y += 1
elif direction == '>':
self.x += 1
elif direction == '<':
self.x += -1
elif direction == 'v':
self.y += -1
def get_pos(self):
return (self.x, self.y)
# House = {(x, y): presents}
santa = Santa()
robosanta = Santa()
houses = {(0, 0): 2}
turn = 0
for direction in puzzle_input:
if turn == 0:
santa.move(direction)
position = santa.get_pos()
turn = 1
elif turn == 1:
robosanta.move(direction)
position = robosanta.get_pos()
turn = 0
if position in houses:
houses[position] += 1
else:
houses[position] = 1
def total_with_presents():
total = 0
for num in houses.values():
if num > 0:
total += 1
return total
print(total_with_presents())
# My answer: 2360
|
<commit_before><commit_msg>Complete day 3 part 2<commit_after>with open('day-03-input.txt', 'r') as f:
puzzle_input = f.read().replace('\n', '')
# Coordinates: (x, y)
class Santa:
def __init__(self):
self.x = 0
self.y = 0
def move(self, direction):
if direction == '^':
self.y += 1
elif direction == '>':
self.x += 1
elif direction == '<':
self.x += -1
elif direction == 'v':
self.y += -1
def get_pos(self):
return (self.x, self.y)
# House = {(x, y): presents}
santa = Santa()
robosanta = Santa()
houses = {(0, 0): 2}
turn = 0
for direction in puzzle_input:
if turn == 0:
santa.move(direction)
position = santa.get_pos()
turn = 1
elif turn == 1:
robosanta.move(direction)
position = robosanta.get_pos()
turn = 0
if position in houses:
houses[position] += 1
else:
houses[position] = 1
def total_with_presents():
total = 0
for num in houses.values():
if num > 0:
total += 1
return total
print(total_with_presents())
# My answer: 2360
|
|
b8dff8fb466c3627af75b06af5d46eebf0e3c12c
|
game_of_thrones.py
|
game_of_thrones.py
|
'''
Dothraki are planning an attack to usurp King Robert's throne. King Robert
learns of this conspiracy from Raven and plans to lock the single door
through which the enemy can enter his kingdom.
But, to lock the door he needs a key that is an anagram of a certain
palindrome string.The king has a string composed of lowercase English
letters. Help him figure out whether any anagram of the string can be
a palindrome or not.
Input Format:
A single line which contains the input string.
Constraints :
1≤ length of string ≤105
Each character of the string is a lowercase English letter.
Output Format :
A single line which contains YES or NO in uppercase.
Sample Input : 01
aaabbbb
Sample Output : 01
YES
Explanation
A palindrome permutation of the given string is bbaaabb.
Sample Input : 02
cdefghmnopqrstuvw
Sample Output : 02
NO
Explanation
You can verify that the given string has no palindrome permutation.
Sample Input : 03
cdcdcdcdeeeef
Sample Output : 03
YES
Explanation
A palindrome permutation of the given string is ddcceefeeccdd
'''
string = list(raw_input())
found = True
# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False
middle_occured = False
while(len(string)>1):
char = string.pop(0)
val_exist = False
try:
string.index(char)
val_exist = True
except:
val_exist = False
if( not val_exist and middle_occured):
found = False
break
elif(not val_exist and not middle_occured):
middle_occured = True
else:
string.pop(string.index(char))
if not found:
print("NO")
else:
print("YES")
|
Add Game of Thrones 1
|
Add Game of Thrones 1
|
Python
|
mit
|
GingerNinja23/programming-questions
|
Add Game of Thrones 1
|
'''
Dothraki are planning an attack to usurp King Robert's throne. King Robert
learns of this conspiracy from Raven and plans to lock the single door
through which the enemy can enter his kingdom.
But, to lock the door he needs a key that is an anagram of a certain
palindrome string.The king has a string composed of lowercase English
letters. Help him figure out whether any anagram of the string can be
a palindrome or not.
Input Format:
A single line which contains the input string.
Constraints :
1≤ length of string ≤105
Each character of the string is a lowercase English letter.
Output Format :
A single line which contains YES or NO in uppercase.
Sample Input : 01
aaabbbb
Sample Output : 01
YES
Explanation
A palindrome permutation of the given string is bbaaabb.
Sample Input : 02
cdefghmnopqrstuvw
Sample Output : 02
NO
Explanation
You can verify that the given string has no palindrome permutation.
Sample Input : 03
cdcdcdcdeeeef
Sample Output : 03
YES
Explanation
A palindrome permutation of the given string is ddcceefeeccdd
'''
string = list(raw_input())
found = True
# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False
middle_occured = False
while(len(string)>1):
char = string.pop(0)
val_exist = False
try:
string.index(char)
val_exist = True
except:
val_exist = False
if( not val_exist and middle_occured):
found = False
break
elif(not val_exist and not middle_occured):
middle_occured = True
else:
string.pop(string.index(char))
if not found:
print("NO")
else:
print("YES")
|
<commit_before><commit_msg>Add Game of Thrones 1<commit_after>
|
'''
Dothraki are planning an attack to usurp King Robert's throne. King Robert
learns of this conspiracy from Raven and plans to lock the single door
through which the enemy can enter his kingdom.
But, to lock the door he needs a key that is an anagram of a certain
palindrome string.The king has a string composed of lowercase English
letters. Help him figure out whether any anagram of the string can be
a palindrome or not.
Input Format:
A single line which contains the input string.
Constraints :
1≤ length of string ≤105
Each character of the string is a lowercase English letter.
Output Format :
A single line which contains YES or NO in uppercase.
Sample Input : 01
aaabbbb
Sample Output : 01
YES
Explanation
A palindrome permutation of the given string is bbaaabb.
Sample Input : 02
cdefghmnopqrstuvw
Sample Output : 02
NO
Explanation
You can verify that the given string has no palindrome permutation.
Sample Input : 03
cdcdcdcdeeeef
Sample Output : 03
YES
Explanation
A palindrome permutation of the given string is ddcceefeeccdd
'''
string = list(raw_input())
found = True
# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False
middle_occured = False
while(len(string)>1):
char = string.pop(0)
val_exist = False
try:
string.index(char)
val_exist = True
except:
val_exist = False
if( not val_exist and middle_occured):
found = False
break
elif(not val_exist and not middle_occured):
middle_occured = True
else:
string.pop(string.index(char))
if not found:
print("NO")
else:
print("YES")
|
Add Game of Thrones 1'''
Dothraki are planning an attack to usurp King Robert's throne. King Robert
learns of this conspiracy from Raven and plans to lock the single door
through which the enemy can enter his kingdom.
But, to lock the door he needs a key that is an anagram of a certain
palindrome string.The king has a string composed of lowercase English
letters. Help him figure out whether any anagram of the string can be
a palindrome or not.
Input Format:
A single line which contains the input string.
Constraints :
1≤ length of string ≤105
Each character of the string is a lowercase English letter.
Output Format :
A single line which contains YES or NO in uppercase.
Sample Input : 01
aaabbbb
Sample Output : 01
YES
Explanation
A palindrome permutation of the given string is bbaaabb.
Sample Input : 02
cdefghmnopqrstuvw
Sample Output : 02
NO
Explanation
You can verify that the given string has no palindrome permutation.
Sample Input : 03
cdcdcdcdeeeef
Sample Output : 03
YES
Explanation
A palindrome permutation of the given string is ddcceefeeccdd
'''
string = list(raw_input())
found = True
# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False
middle_occured = False
while(len(string)>1):
char = string.pop(0)
val_exist = False
try:
string.index(char)
val_exist = True
except:
val_exist = False
if( not val_exist and middle_occured):
found = False
break
elif(not val_exist and not middle_occured):
middle_occured = True
else:
string.pop(string.index(char))
if not found:
print("NO")
else:
print("YES")
|
<commit_before><commit_msg>Add Game of Thrones 1<commit_after>'''
Dothraki are planning an attack to usurp King Robert's throne. King Robert
learns of this conspiracy from Raven and plans to lock the single door
through which the enemy can enter his kingdom.
But, to lock the door he needs a key that is an anagram of a certain
palindrome string.The king has a string composed of lowercase English
letters. Help him figure out whether any anagram of the string can be
a palindrome or not.
Input Format:
A single line which contains the input string.
Constraints :
1≤ length of string ≤105
Each character of the string is a lowercase English letter.
Output Format :
A single line which contains YES or NO in uppercase.
Sample Input : 01
aaabbbb
Sample Output : 01
YES
Explanation
A palindrome permutation of the given string is bbaaabb.
Sample Input : 02
cdefghmnopqrstuvw
Sample Output : 02
NO
Explanation
You can verify that the given string has no palindrome permutation.
Sample Input : 03
cdcdcdcdeeeef
Sample Output : 03
YES
Explanation
A palindrome permutation of the given string is ddcceefeeccdd
'''
string = list(raw_input())
found = True
# Write the code to find the required palindrome and then assign the variable 'found' a value of True or False
middle_occured = False
while(len(string)>1):
char = string.pop(0)
val_exist = False
try:
string.index(char)
val_exist = True
except:
val_exist = False
if( not val_exist and middle_occured):
found = False
break
elif(not val_exist and not middle_occured):
middle_occured = True
else:
string.pop(string.index(char))
if not found:
print("NO")
else:
print("YES")
|
|
a12be94ad54b826fb664b953dbeaabdbe5451f6c
|
games/migrations/0022_installer_reason.py
|
games/migrations/0022_installer_reason.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0021_auto_20170824_0548'),
]
operations = [
migrations.AddField(
model_name='installer',
name='reason',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
Add migrations for installer reason
|
Add migrations for installer reason
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add migrations for installer reason
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0021_auto_20170824_0548'),
]
operations = [
migrations.AddField(
model_name='installer',
name='reason',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
<commit_before><commit_msg>Add migrations for installer reason<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0021_auto_20170824_0548'),
]
operations = [
migrations.AddField(
model_name='installer',
name='reason',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
Add migrations for installer reason# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0021_auto_20170824_0548'),
]
operations = [
migrations.AddField(
model_name='installer',
name='reason',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
<commit_before><commit_msg>Add migrations for installer reason<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0021_auto_20170824_0548'),
]
operations = [
migrations.AddField(
model_name='installer',
name='reason',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
|
|
9e941899aab38888a6e34bbe268eb7de7f8e27ee
|
zou/migrations/versions/443d1e78a932_.py
|
zou/migrations/versions/443d1e78a932_.py
|
"""empty message
Revision ID: 443d1e78a932
Revises: 59e149a772cf
Create Date: 2018-08-07 21:36:03.384838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443d1e78a932'
down_revision = 'c726b98be194'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task_type', sa.Column('allow_timelog', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_type', 'allow_timelog')
# ### end Alembic commands ###
|
Add migration file for allow timelog flag
|
Add migration file for allow timelog flag
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add migration file for allow timelog flag
|
"""empty message
Revision ID: 443d1e78a932
Revises: 59e149a772cf
Create Date: 2018-08-07 21:36:03.384838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443d1e78a932'
down_revision = 'c726b98be194'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task_type', sa.Column('allow_timelog', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_type', 'allow_timelog')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration file for allow timelog flag<commit_after>
|
"""empty message
Revision ID: 443d1e78a932
Revises: 59e149a772cf
Create Date: 2018-08-07 21:36:03.384838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443d1e78a932'
down_revision = 'c726b98be194'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task_type', sa.Column('allow_timelog', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_type', 'allow_timelog')
# ### end Alembic commands ###
|
Add migration file for allow timelog flag"""empty message
Revision ID: 443d1e78a932
Revises: 59e149a772cf
Create Date: 2018-08-07 21:36:03.384838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443d1e78a932'
down_revision = 'c726b98be194'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task_type', sa.Column('allow_timelog', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_type', 'allow_timelog')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration file for allow timelog flag<commit_after>"""empty message
Revision ID: 443d1e78a932
Revises: 59e149a772cf
Create Date: 2018-08-07 21:36:03.384838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443d1e78a932'
down_revision = 'c726b98be194'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task_type', sa.Column('allow_timelog', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_type', 'allow_timelog')
# ### end Alembic commands ###
|
|
eb3dd84713aff7220ec71fa3c4196aa5c3853e70
|
tests/request_methods/test_subscriptions.py
|
tests/request_methods/test_subscriptions.py
|
"""
Tests for the Subscriptions API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SubscriptionsTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Subscriptions.
"""
# TODO: Add remaining methods for Subscriptions
def setUp(self):
self.api = mws.Subscriptions(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
|
Add stub for Subscriptions API testing
|
Add stub for Subscriptions API testing
|
Python
|
unlicense
|
GriceTurrble/python-amazon-mws,Bobspadger/python-amazon-mws
|
Add stub for Subscriptions API testing
|
"""
Tests for the Subscriptions API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SubscriptionsTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Subscriptions.
"""
# TODO: Add remaining methods for Subscriptions
def setUp(self):
self.api = mws.Subscriptions(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
|
<commit_before><commit_msg>Add stub for Subscriptions API testing<commit_after>
|
"""
Tests for the Subscriptions API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SubscriptionsTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Subscriptions.
"""
# TODO: Add remaining methods for Subscriptions
def setUp(self):
self.api = mws.Subscriptions(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
|
Add stub for Subscriptions API testing"""
Tests for the Subscriptions API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SubscriptionsTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Subscriptions.
"""
# TODO: Add remaining methods for Subscriptions
def setUp(self):
self.api = mws.Subscriptions(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
|
<commit_before><commit_msg>Add stub for Subscriptions API testing<commit_after>"""
Tests for the Subscriptions API class.
"""
import unittest
import mws
from .utils import CommonRequestTestTools
class SubscriptionsTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Subscriptions.
"""
# TODO: Add remaining methods for Subscriptions
def setUp(self):
self.api = mws.Subscriptions(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
|
|
e67e32605552ebfd07e7ade5360b560c4010bd31
|
fluent_blogs/migrations/0003_author_on_delete_set_null.py
|
fluent_blogs/migrations/0003_author_on_delete_set_null.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-30 19:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fluent_blogs', '0002_intro_allow_null'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
]
|
Add missing migration for on_delete=SET_NULL change
|
Add missing migration for on_delete=SET_NULL change
Added in Django 2.0 preparation (f9762719be92d0f33ead67ffa942a030ff514761)
|
Python
|
apache-2.0
|
edoburu/django-fluent-blogs,edoburu/django-fluent-blogs
|
Add missing migration for on_delete=SET_NULL change
Added in Django 2.0 preparation (f9762719be92d0f33ead67ffa942a030ff514761)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-30 19:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fluent_blogs', '0002_intro_allow_null'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
]
|
<commit_before><commit_msg>Add missing migration for on_delete=SET_NULL change
Added in Django 2.0 preparation (f9762719be92d0f33ead67ffa942a030ff514761)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-30 19:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fluent_blogs', '0002_intro_allow_null'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
]
|
Add missing migration for on_delete=SET_NULL change
Added in Django 2.0 preparation (f9762719be92d0f33ead67ffa942a030ff514761)# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-30 19:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fluent_blogs', '0002_intro_allow_null'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
]
|
<commit_before><commit_msg>Add missing migration for on_delete=SET_NULL change
Added in Django 2.0 preparation (f9762719be92d0f33ead67ffa942a030ff514761)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-30 19:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fluent_blogs', '0002_intro_allow_null'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='author'),
),
]
|
|
2c0903218ec0c4ec710f12660c83a23964f177ba
|
atlassian_jwt_auth/tests/test_verifier.py
|
atlassian_jwt_auth/tests/test_verifier.py
|
import unittest
import mock
from ..signer import JWTAuthSigner
from ..verifier import JWTAuthVerifier
from .utils import (
get_new_rsa_private_key_in_pem_format,
get_public_key_pem_for_private_key_pem,
)
class TestJWTAuthVerifier(unittest.TestCase):
""" tests for the JWTAuthVerifier class. """
def setUp(self):
self._private_key_pem = get_new_rsa_private_key_in_pem_format()
self._public_key_pem = get_public_key_pem_for_private_key_pem(
self._private_key_pem)
def _setup_mock_public_key_retriever(self, pub_key_pem):
m_public_key_ret = mock.Mock()
m_public_key_ret.retrieve.return_value = pub_key_pem.decode()
return m_public_key_ret
def test_verify_claims_with_valid_jwt(self):
""" test that verify_claims verifies a valid jwt. """
expected_audience = 'aud_x'
expected_issuer = 'issuer'
expected_key_id = '%s/a' % expected_issuer
m_public_key_ret = self._setup_mock_public_key_retriever(
self._public_key_pem)
verifier = JWTAuthVerifier(m_public_key_ret)
signer = JWTAuthSigner(
expected_issuer,
expected_key_id,
self._private_key_pem.decode())
signed_claims = signer.get_signed_claims(expected_audience)
v_claims = verifier.verify_claims(signed_claims, expected_audience)
self.assertIsNotNone(v_claims)
self.assertEqual(v_claims['aud'], expected_audience)
self.assertEqual(v_claims['iss'], expected_issuer)
|
Add a test for the JWTAuthVerifier.
|
Add a test for the JWTAuthVerifier.
Signed-off-by: David Black <c4b737561a711e07c31fd1e1811f33a5d770e31c@atlassian.com>
|
Python
|
mit
|
atlassian/asap-authentication-python
|
Add a test for the JWTAuthVerifier.
Signed-off-by: David Black <c4b737561a711e07c31fd1e1811f33a5d770e31c@atlassian.com>
|
import unittest
import mock
from ..signer import JWTAuthSigner
from ..verifier import JWTAuthVerifier
from .utils import (
get_new_rsa_private_key_in_pem_format,
get_public_key_pem_for_private_key_pem,
)
class TestJWTAuthVerifier(unittest.TestCase):
""" tests for the JWTAuthVerifier class. """
def setUp(self):
self._private_key_pem = get_new_rsa_private_key_in_pem_format()
self._public_key_pem = get_public_key_pem_for_private_key_pem(
self._private_key_pem)
def _setup_mock_public_key_retriever(self, pub_key_pem):
m_public_key_ret = mock.Mock()
m_public_key_ret.retrieve.return_value = pub_key_pem.decode()
return m_public_key_ret
def test_verify_claims_with_valid_jwt(self):
""" test that verify_claims verifies a valid jwt. """
expected_audience = 'aud_x'
expected_issuer = 'issuer'
expected_key_id = '%s/a' % expected_issuer
m_public_key_ret = self._setup_mock_public_key_retriever(
self._public_key_pem)
verifier = JWTAuthVerifier(m_public_key_ret)
signer = JWTAuthSigner(
expected_issuer,
expected_key_id,
self._private_key_pem.decode())
signed_claims = signer.get_signed_claims(expected_audience)
v_claims = verifier.verify_claims(signed_claims, expected_audience)
self.assertIsNotNone(v_claims)
self.assertEqual(v_claims['aud'], expected_audience)
self.assertEqual(v_claims['iss'], expected_issuer)
|
<commit_before><commit_msg>Add a test for the JWTAuthVerifier.
Signed-off-by: David Black <c4b737561a711e07c31fd1e1811f33a5d770e31c@atlassian.com><commit_after>
|
import unittest
import mock
from ..signer import JWTAuthSigner
from ..verifier import JWTAuthVerifier
from .utils import (
get_new_rsa_private_key_in_pem_format,
get_public_key_pem_for_private_key_pem,
)
class TestJWTAuthVerifier(unittest.TestCase):
""" tests for the JWTAuthVerifier class. """
def setUp(self):
self._private_key_pem = get_new_rsa_private_key_in_pem_format()
self._public_key_pem = get_public_key_pem_for_private_key_pem(
self._private_key_pem)
def _setup_mock_public_key_retriever(self, pub_key_pem):
m_public_key_ret = mock.Mock()
m_public_key_ret.retrieve.return_value = pub_key_pem.decode()
return m_public_key_ret
def test_verify_claims_with_valid_jwt(self):
""" test that verify_claims verifies a valid jwt. """
expected_audience = 'aud_x'
expected_issuer = 'issuer'
expected_key_id = '%s/a' % expected_issuer
m_public_key_ret = self._setup_mock_public_key_retriever(
self._public_key_pem)
verifier = JWTAuthVerifier(m_public_key_ret)
signer = JWTAuthSigner(
expected_issuer,
expected_key_id,
self._private_key_pem.decode())
signed_claims = signer.get_signed_claims(expected_audience)
v_claims = verifier.verify_claims(signed_claims, expected_audience)
self.assertIsNotNone(v_claims)
self.assertEqual(v_claims['aud'], expected_audience)
self.assertEqual(v_claims['iss'], expected_issuer)
|
Add a test for the JWTAuthVerifier.
Signed-off-by: David Black <c4b737561a711e07c31fd1e1811f33a5d770e31c@atlassian.com>import unittest
import mock
from ..signer import JWTAuthSigner
from ..verifier import JWTAuthVerifier
from .utils import (
get_new_rsa_private_key_in_pem_format,
get_public_key_pem_for_private_key_pem,
)
class TestJWTAuthVerifier(unittest.TestCase):
""" tests for the JWTAuthVerifier class. """
def setUp(self):
self._private_key_pem = get_new_rsa_private_key_in_pem_format()
self._public_key_pem = get_public_key_pem_for_private_key_pem(
self._private_key_pem)
def _setup_mock_public_key_retriever(self, pub_key_pem):
m_public_key_ret = mock.Mock()
m_public_key_ret.retrieve.return_value = pub_key_pem.decode()
return m_public_key_ret
def test_verify_claims_with_valid_jwt(self):
""" test that verify_claims verifies a valid jwt. """
expected_audience = 'aud_x'
expected_issuer = 'issuer'
expected_key_id = '%s/a' % expected_issuer
m_public_key_ret = self._setup_mock_public_key_retriever(
self._public_key_pem)
verifier = JWTAuthVerifier(m_public_key_ret)
signer = JWTAuthSigner(
expected_issuer,
expected_key_id,
self._private_key_pem.decode())
signed_claims = signer.get_signed_claims(expected_audience)
v_claims = verifier.verify_claims(signed_claims, expected_audience)
self.assertIsNotNone(v_claims)
self.assertEqual(v_claims['aud'], expected_audience)
self.assertEqual(v_claims['iss'], expected_issuer)
|
<commit_before><commit_msg>Add a test for the JWTAuthVerifier.
Signed-off-by: David Black <c4b737561a711e07c31fd1e1811f33a5d770e31c@atlassian.com><commit_after>import unittest
import mock
from ..signer import JWTAuthSigner
from ..verifier import JWTAuthVerifier
from .utils import (
get_new_rsa_private_key_in_pem_format,
get_public_key_pem_for_private_key_pem,
)
class TestJWTAuthVerifier(unittest.TestCase):
""" tests for the JWTAuthVerifier class. """
def setUp(self):
self._private_key_pem = get_new_rsa_private_key_in_pem_format()
self._public_key_pem = get_public_key_pem_for_private_key_pem(
self._private_key_pem)
def _setup_mock_public_key_retriever(self, pub_key_pem):
m_public_key_ret = mock.Mock()
m_public_key_ret.retrieve.return_value = pub_key_pem.decode()
return m_public_key_ret
def test_verify_claims_with_valid_jwt(self):
""" test that verify_claims verifies a valid jwt. """
expected_audience = 'aud_x'
expected_issuer = 'issuer'
expected_key_id = '%s/a' % expected_issuer
m_public_key_ret = self._setup_mock_public_key_retriever(
self._public_key_pem)
verifier = JWTAuthVerifier(m_public_key_ret)
signer = JWTAuthSigner(
expected_issuer,
expected_key_id,
self._private_key_pem.decode())
signed_claims = signer.get_signed_claims(expected_audience)
v_claims = verifier.verify_claims(signed_claims, expected_audience)
self.assertIsNotNone(v_claims)
self.assertEqual(v_claims['aud'], expected_audience)
self.assertEqual(v_claims['iss'], expected_issuer)
|
|
e2eda2123f24f65f3c17626b95abca28ed951479
|
gen_sine_table.py
|
gen_sine_table.py
|
import math
with open("sine_table.h", "w") as f:
sines = []
for i in range(-1, 362):
sines.append(str("%.15f" % math.sin(math.radians(i))))
f.write("""#ifndef SINE_TABLE_H
#define SINE_TABLE_H
const double sine_table[] = { %s };
#endif /* SINE_TABLE_H */""" % (", ".join(sines), ))
|
Use script to generate sine table for interpolation
|
Use script to generate sine table for interpolation
|
Python
|
mit
|
ranisalt/epos-pcm-to-rgb,ranisalt/epos-pcm-to-rgb
|
Use script to generate sine table for interpolation
|
import math
with open("sine_table.h", "w") as f:
sines = []
for i in range(-1, 362):
sines.append(str("%.15f" % math.sin(math.radians(i))))
f.write("""#ifndef SINE_TABLE_H
#define SINE_TABLE_H
const double sine_table[] = { %s };
#endif /* SINE_TABLE_H */""" % (", ".join(sines), ))
|
<commit_before><commit_msg>Use script to generate sine table for interpolation<commit_after>
|
import math
with open("sine_table.h", "w") as f:
sines = []
for i in range(-1, 362):
sines.append(str("%.15f" % math.sin(math.radians(i))))
f.write("""#ifndef SINE_TABLE_H
#define SINE_TABLE_H
const double sine_table[] = { %s };
#endif /* SINE_TABLE_H */""" % (", ".join(sines), ))
|
Use script to generate sine table for interpolationimport math
with open("sine_table.h", "w") as f:
sines = []
for i in range(-1, 362):
sines.append(str("%.15f" % math.sin(math.radians(i))))
f.write("""#ifndef SINE_TABLE_H
#define SINE_TABLE_H
const double sine_table[] = { %s };
#endif /* SINE_TABLE_H */""" % (", ".join(sines), ))
|
<commit_before><commit_msg>Use script to generate sine table for interpolation<commit_after>import math
with open("sine_table.h", "w") as f:
sines = []
for i in range(-1, 362):
sines.append(str("%.15f" % math.sin(math.radians(i))))
f.write("""#ifndef SINE_TABLE_H
#define SINE_TABLE_H
const double sine_table[] = { %s };
#endif /* SINE_TABLE_H */""" % (", ".join(sines), ))
|
|
25958e50f39a747a871961b5d97f027a925b7928
|
code/data_setup/parse_alov_bb.py
|
code/data_setup/parse_alov_bb.py
|
"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
end_lst = []
for filename in bbox_xml_filenames:
end_lst.extend(parse_file(bbox_dir, filename))
|
Initialize alov parsing code with main block
|
Initialize alov parsing code with main block
|
Python
|
mit
|
dansbecker/motion-tracking
|
Initialize alov parsing code with main block
|
"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
end_lst = []
for filename in bbox_xml_filenames:
end_lst.extend(parse_file(bbox_dir, filename))
|
<commit_before><commit_msg>Initialize alov parsing code with main block<commit_after>
|
"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
end_lst = []
for filename in bbox_xml_filenames:
end_lst.extend(parse_file(bbox_dir, filename))
|
Initialize alov parsing code with main block"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
end_lst = []
for filename in bbox_xml_filenames:
end_lst.extend(parse_file(bbox_dir, filename))
|
<commit_before><commit_msg>Initialize alov parsing code with main block<commit_after>"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
end_lst = []
for filename in bbox_xml_filenames:
end_lst.extend(parse_file(bbox_dir, filename))
|
|
375e97c18daa7f12896dd3f6b9dccd179f22aa03
|
gym_frozen_lake.py
|
gym_frozen_lake.py
|
# -*- coding: utf-8 -*-
import gym
from gym import wrappers
import numpy as np
import agent
# helper function and dictionaries
def running_average(a, size=100):
"""calculates the running average over array a"""
ra = []
ra.append(np.sum(a[:size]))
for i in range(size, len(a)):
ra.append(ra[-1] + a[i] - a[i - size])
return 1. / size * np.array(ra)
translate_move_2d_to_1d = {
tuple([-1., 0.]): 0,
tuple([0., 1.]): 1,
tuple([1., 0.]): 2,
tuple([0., -1.]): 3,
}
translate_pos_1d_to_2d = {
}
i = 0
for y in np.arange(0., 4., 1.):
for x in np.arange(0., 4., 1.):
translate_pos_1d_to_2d[i] = np.array([x, y])
i += 1
# define parameters
env_name = 'FrozenLake-v0'
initial_pos = [0., 0.]
env_shape = (4, 4)
possible_moves = [np.array(move) for move in translate_move_2d_to_1d.keys()]
n_episodes = 500
# set up environment and recording
env = gym.make(env_name)
# env = wrappers.Monitor(env, '/tmp/frozen-lake-experiment-2')
agent = agent.Agent(initial_pos, env_shape, possible_moves)
# run episodes
episode = 0
episode_reward = []
for episode in range(n_episodes):
print('episode start.')
env.reset()
done = False
reward = 0.
agent.reset_position()
agent.reset_reward()
obs = 0
while not done:
env.render()
action = agent.step(reward)
obs, reward, done, _ = env.step(translate_move_2d_to_1d[tuple(action)])
# since the ice is slippery, we need to correct current
# position and last move from observation of environment
agent._pos = translate_pos_1d_to_2d[obs]
agent._pos = translate_pos_1d_to_2d[obs]
agent.step(reward - 0.1)
episode += 1
episode_reward.append(reward)
print('episode end. episode {episode}, reward {reward}'.format(episode=episode, reward=np.sum(episode_reward)))
ra = running_average(episode_reward)
print('solved after {n} episodes. maximal reward over 100 episodes: {max_reward}'.format(n=np.where(ra >= 0.78)[0][0], max_reward=np.max(ra)))
|
Add solution for OpenAI gym frozen lake environment
|
Add solution for OpenAI gym frozen lake environment
|
Python
|
mit
|
jakobj/python-tdl
|
Add solution for OpenAI gym frozen lake environment
|
# -*- coding: utf-8 -*-
import gym
from gym import wrappers
import numpy as np
import agent
# helper function and dictionaries
def running_average(a, size=100):
"""calculates the running average over array a"""
ra = []
ra.append(np.sum(a[:size]))
for i in range(size, len(a)):
ra.append(ra[-1] + a[i] - a[i - size])
return 1. / size * np.array(ra)
translate_move_2d_to_1d = {
tuple([-1., 0.]): 0,
tuple([0., 1.]): 1,
tuple([1., 0.]): 2,
tuple([0., -1.]): 3,
}
translate_pos_1d_to_2d = {
}
i = 0
for y in np.arange(0., 4., 1.):
for x in np.arange(0., 4., 1.):
translate_pos_1d_to_2d[i] = np.array([x, y])
i += 1
# define parameters
env_name = 'FrozenLake-v0'
initial_pos = [0., 0.]
env_shape = (4, 4)
possible_moves = [np.array(move) for move in translate_move_2d_to_1d.keys()]
n_episodes = 500
# set up environment and recording
env = gym.make(env_name)
# env = wrappers.Monitor(env, '/tmp/frozen-lake-experiment-2')
agent = agent.Agent(initial_pos, env_shape, possible_moves)
# run episodes
episode = 0
episode_reward = []
for episode in range(n_episodes):
print('episode start.')
env.reset()
done = False
reward = 0.
agent.reset_position()
agent.reset_reward()
obs = 0
while not done:
env.render()
action = agent.step(reward)
obs, reward, done, _ = env.step(translate_move_2d_to_1d[tuple(action)])
# since the ice is slippery, we need to correct current
# position and last move from observation of environment
agent._pos = translate_pos_1d_to_2d[obs]
agent._pos = translate_pos_1d_to_2d[obs]
agent.step(reward - 0.1)
episode += 1
episode_reward.append(reward)
print('episode end. episode {episode}, reward {reward}'.format(episode=episode, reward=np.sum(episode_reward)))
ra = running_average(episode_reward)
print('solved after {n} episodes. maximal reward over 100 episodes: {max_reward}'.format(n=np.where(ra >= 0.78)[0][0], max_reward=np.max(ra)))
|
<commit_before><commit_msg>Add solution for OpenAI gym frozen lake environment<commit_after>
|
# -*- coding: utf-8 -*-
import gym
from gym import wrappers
import numpy as np
import agent
# helper function and dictionaries
def running_average(a, size=100):
"""calculates the running average over array a"""
ra = []
ra.append(np.sum(a[:size]))
for i in range(size, len(a)):
ra.append(ra[-1] + a[i] - a[i - size])
return 1. / size * np.array(ra)
translate_move_2d_to_1d = {
tuple([-1., 0.]): 0,
tuple([0., 1.]): 1,
tuple([1., 0.]): 2,
tuple([0., -1.]): 3,
}
translate_pos_1d_to_2d = {
}
i = 0
for y in np.arange(0., 4., 1.):
for x in np.arange(0., 4., 1.):
translate_pos_1d_to_2d[i] = np.array([x, y])
i += 1
# define parameters
env_name = 'FrozenLake-v0'
initial_pos = [0., 0.]
env_shape = (4, 4)
possible_moves = [np.array(move) for move in translate_move_2d_to_1d.keys()]
n_episodes = 500
# set up environment and recording
env = gym.make(env_name)
# env = wrappers.Monitor(env, '/tmp/frozen-lake-experiment-2')
agent = agent.Agent(initial_pos, env_shape, possible_moves)
# run episodes
episode = 0
episode_reward = []
for episode in range(n_episodes):
print('episode start.')
env.reset()
done = False
reward = 0.
agent.reset_position()
agent.reset_reward()
obs = 0
while not done:
env.render()
action = agent.step(reward)
obs, reward, done, _ = env.step(translate_move_2d_to_1d[tuple(action)])
# since the ice is slippery, we need to correct current
# position and last move from observation of environment
agent._pos = translate_pos_1d_to_2d[obs]
agent._pos = translate_pos_1d_to_2d[obs]
agent.step(reward - 0.1)
episode += 1
episode_reward.append(reward)
print('episode end. episode {episode}, reward {reward}'.format(episode=episode, reward=np.sum(episode_reward)))
ra = running_average(episode_reward)
print('solved after {n} episodes. maximal reward over 100 episodes: {max_reward}'.format(n=np.where(ra >= 0.78)[0][0], max_reward=np.max(ra)))
|
Add solution for OpenAI gym frozen lake environment# -*- coding: utf-8 -*-
import gym
from gym import wrappers
import numpy as np
import agent
# helper function and dictionaries
def running_average(a, size=100):
"""calculates the running average over array a"""
ra = []
ra.append(np.sum(a[:size]))
for i in range(size, len(a)):
ra.append(ra[-1] + a[i] - a[i - size])
return 1. / size * np.array(ra)
translate_move_2d_to_1d = {
tuple([-1., 0.]): 0,
tuple([0., 1.]): 1,
tuple([1., 0.]): 2,
tuple([0., -1.]): 3,
}
translate_pos_1d_to_2d = {
}
i = 0
for y in np.arange(0., 4., 1.):
for x in np.arange(0., 4., 1.):
translate_pos_1d_to_2d[i] = np.array([x, y])
i += 1
# define parameters
env_name = 'FrozenLake-v0'
initial_pos = [0., 0.]
env_shape = (4, 4)
possible_moves = [np.array(move) for move in translate_move_2d_to_1d.keys()]
n_episodes = 500
# set up environment and recording
env = gym.make(env_name)
# env = wrappers.Monitor(env, '/tmp/frozen-lake-experiment-2')
agent = agent.Agent(initial_pos, env_shape, possible_moves)
# run episodes
episode = 0
episode_reward = []
for episode in range(n_episodes):
print('episode start.')
env.reset()
done = False
reward = 0.
agent.reset_position()
agent.reset_reward()
obs = 0
while not done:
env.render()
action = agent.step(reward)
obs, reward, done, _ = env.step(translate_move_2d_to_1d[tuple(action)])
# since the ice is slippery, we need to correct current
# position and last move from observation of environment
agent._pos = translate_pos_1d_to_2d[obs]
agent._pos = translate_pos_1d_to_2d[obs]
agent.step(reward - 0.1)
episode += 1
episode_reward.append(reward)
print('episode end. episode {episode}, reward {reward}'.format(episode=episode, reward=np.sum(episode_reward)))
ra = running_average(episode_reward)
print('solved after {n} episodes. maximal reward over 100 episodes: {max_reward}'.format(n=np.where(ra >= 0.78)[0][0], max_reward=np.max(ra)))
|
<commit_before><commit_msg>Add solution for OpenAI gym frozen lake environment<commit_after># -*- coding: utf-8 -*-
import gym
from gym import wrappers
import numpy as np
import agent
# helper function and dictionaries
def running_average(a, size=100):
"""calculates the running average over array a"""
ra = []
ra.append(np.sum(a[:size]))
for i in range(size, len(a)):
ra.append(ra[-1] + a[i] - a[i - size])
return 1. / size * np.array(ra)
translate_move_2d_to_1d = {
tuple([-1., 0.]): 0,
tuple([0., 1.]): 1,
tuple([1., 0.]): 2,
tuple([0., -1.]): 3,
}
translate_pos_1d_to_2d = {
}
i = 0
for y in np.arange(0., 4., 1.):
for x in np.arange(0., 4., 1.):
translate_pos_1d_to_2d[i] = np.array([x, y])
i += 1
# define parameters
env_name = 'FrozenLake-v0'
initial_pos = [0., 0.]
env_shape = (4, 4)
possible_moves = [np.array(move) for move in translate_move_2d_to_1d.keys()]
n_episodes = 500
# set up environment and recording
env = gym.make(env_name)
# env = wrappers.Monitor(env, '/tmp/frozen-lake-experiment-2')
agent = agent.Agent(initial_pos, env_shape, possible_moves)
# run episodes
episode = 0
episode_reward = []
for episode in range(n_episodes):
print('episode start.')
env.reset()
done = False
reward = 0.
agent.reset_position()
agent.reset_reward()
obs = 0
while not done:
env.render()
action = agent.step(reward)
obs, reward, done, _ = env.step(translate_move_2d_to_1d[tuple(action)])
# since the ice is slippery, we need to correct current
# position and last move from observation of environment
agent._pos = translate_pos_1d_to_2d[obs]
agent._pos = translate_pos_1d_to_2d[obs]
agent.step(reward - 0.1)
episode += 1
episode_reward.append(reward)
print('episode end. episode {episode}, reward {reward}'.format(episode=episode, reward=np.sum(episode_reward)))
ra = running_average(episode_reward)
print('solved after {n} episodes. maximal reward over 100 episodes: {max_reward}'.format(n=np.where(ra >= 0.78)[0][0], max_reward=np.max(ra)))
|
|
00ae10769d95445b80be0e8d129fbc76b63aca5a
|
flexget/utils/soup.py
|
flexget/utils/soup.py
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
import warnings
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(namespaceHTMLElements=False, tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
Hide DataLossWarnings that appeared with html5lib 0.90 or so.
|
Hide DataLossWarnings that appeared with html5lib 0.90 or so.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1124 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
Python
|
mit
|
qk4l/Flexget,sean797/Flexget,ZefQ/Flexget,qk4l/Flexget,tobinjt/Flexget,grrr2/Flexget,jacobmetrick/Flexget,oxc/Flexget,camon/Flexget,crawln45/Flexget,xfouloux/Flexget,thalamus/Flexget,JorisDeRieck/Flexget,vfrc2/Flexget,patsissons/Flexget,cvium/Flexget,ianstalk/Flexget,tvcsantos/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,v17al/Flexget,tvcsantos/Flexget,thalamus/Flexget,xfouloux/Flexget,cvium/Flexget,crawln45/Flexget,tobinjt/Flexget,vfrc2/Flexget,gazpachoking/Flexget,Pretagonist/Flexget,ibrahimkarahan/Flexget,Pretagonist/Flexget,dsemi/Flexget,Danfocus/Flexget,tsnoam/Flexget,lildadou/Flexget,Flexget/Flexget,Danfocus/Flexget,tobinjt/Flexget,ratoaq2/Flexget,camon/Flexget,tobinjt/Flexget,Danfocus/Flexget,OmgOhnoes/Flexget,poulpito/Flexget,v17al/Flexget,lildadou/Flexget,tarzasai/Flexget,offbyone/Flexget,jawilson/Flexget,Pretagonist/Flexget,ianstalk/Flexget,cvium/Flexget,v17al/Flexget,qvazzler/Flexget,X-dark/Flexget,ianstalk/Flexget,vfrc2/Flexget,malkavi/Flexget,sean797/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,LynxyssCZ/Flexget,antivirtel/Flexget,voriux/Flexget,ratoaq2/Flexget,lildadou/Flexget,drwyrm/Flexget,ZefQ/Flexget,ibrahimkarahan/Flexget,OmgOhnoes/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,antivirtel/Flexget,jacobmetrick/Flexget,drwyrm/Flexget,asm0dey/Flexget,malkavi/Flexget,offbyone/Flexget,offbyone/Flexget,poulpito/Flexget,qk4l/Flexget,asm0dey/Flexget,Flexget/Flexget,X-dark/Flexget,xfouloux/Flexget,voriux/Flexget,tarzasai/Flexget,crawln45/Flexget,ZefQ/Flexget,Flexget/Flexget,Danfocus/Flexget,thalamus/Flexget,drwyrm/Flexget,qvazzler/Flexget,crawln45/Flexget,X-dark/Flexget,tsnoam/Flexget,sean797/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,dsemi/Flexget,grrr2/Flexget,patsissons/Flexget,spencerjanssen/Flexget,jacobmetrick/Flexget,malkavi/Flexget,LynxyssCZ/Flexget,tarzasai/Flexget,ibrahimkarahan/Flexget,oxc/Flexget,ratoaq2/Flexget,jawilson/Flexget,poulpito/Flexget,gazpachoking/Flexget,malkavi/Flexget,antivirtel/Flexget,asm0dey/Flexget,Flexget/Flexget,oxc/Flexget,grrr2/Flexget,patsissons/Flexget,LynxyssCZ/Flexget,jawilson/Flexget
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
Hide DataLossWarnings that appeared with html5lib 0.90 or so.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1124 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
import warnings
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(namespaceHTMLElements=False, tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
<commit_before>import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
<commit_msg>Hide DataLossWarnings that appeared with html5lib 0.90 or so.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1124 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
import warnings
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(namespaceHTMLElements=False, tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
Hide DataLossWarnings that appeared with html5lib 0.90 or so.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1124 3942dd89-8c5d-46d7-aeed-044bccf3e60cimport html5lib
from html5lib import treebuilders
from cStringIO import StringIO
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
import warnings
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(namespaceHTMLElements=False, tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
<commit_before>import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
<commit_msg>Hide DataLossWarnings that appeared with html5lib 0.90 or so.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1124 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>import html5lib
from html5lib import treebuilders
from cStringIO import StringIO
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
import warnings
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj):
if isinstance(obj, basestring):
obj = StringIO(obj)
parser = html5lib.HTMLParser(namespaceHTMLElements=False, tree=treebuilders.getTreeBuilder('beautifulsoup'))
return parser.parse(obj)
|
0d94db97e795c572c723390ad2e4f978bb9d6296
|
nova/tests/virt_unittest.py
|
nova/tests/virt_unittest.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
Python
|
apache-2.0
|
n0ano/gantt,n0ano/gantt
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
|
cf9ce9293666201a4fabb7439a440c538ba77b47
|
oauth2/test/__init__.py
|
oauth2/test/__init__.py
|
import sys
# Enables unit tests to be work under Python 2.6
# Code copied from https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
Handle import of unittest2 in python 2.6
|
Handle import of unittest2 in python 2.6
|
Python
|
mit
|
mygola/python-oauth2,wndhydrnt/python-oauth2,mygola/python-oauth2,mygola/python-oauth2,wndhydrnt/python-oauth2,gcd0318/python-oauth2,gcd0318/python-oauth2,CoolCloud/python-oauth2,candango/python-oauth2,candango/python-oauth2,CoolCloud/python-oauth2,candango/python-oauth2,CoolCloud/python-oauth2,wndhydrnt/python-oauth2,gcd0318/python-oauth2
|
Handle import of unittest2 in python 2.6
|
import sys
# Enables unit tests to be work under Python 2.6
# Code copied from https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
<commit_before><commit_msg>Handle import of unittest2 in python 2.6<commit_after>
|
import sys
# Enables unit tests to be work under Python 2.6
# Code copied from https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
Handle import of unittest2 in python 2.6import sys
# Enables unit tests to be work under Python 2.6
# Code copied from https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
<commit_before><commit_msg>Handle import of unittest2 in python 2.6<commit_after>import sys
# Enables unit tests to be work under Python 2.6
# Code copied from https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
|
3badc580de36ff0285f065e6924887cab4e3a48b
|
DataTag/management/commands/addtags.py
|
DataTag/management/commands/addtags.py
|
# -*- coding: utf-8 -*-
# vim: set ts=
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
import yaml
class Command(BaseCommand):
args = None
help = 'Collect the tags from the sub-directories'
option_list = BaseCommand.option_list + (
make_option('-p', '--pattern',
dest='pattern',
help='Pattern when matching the files',
default='*'),
)
def handle(self, *args, **kwargs):
pattern = kwargs['pattern']
new_tags = set(args)
# Load the tags from the configuration
local_tags = set()
pattern_in_local = False
try:
with open('.DataTag.yaml', 'r') as local_conf:
local_conf = yaml.load(local_conf)
if pattern in local_conf:
pattern_in_local = True
for tag in local_conf[pattern]['tags']:
local_tags.add(tag)
except IOError:
print 'no such file or directory'
pass
# Add the tags that are missing in the configuration
missing_tags = new_tags - local_tags
if missing_tags:
print("Adding missing tags")
print("===================")
if pattern_in_local:
print "TODO"
else:
with open('.DataTag.yaml', 'a+') as local_conf:
if '*' in pattern or ':' in pattern:
local_conf.write("'%s':\n tags:\n" % (pattern))
else:
local_conf.write("%s:\n tags:\n" % (pattern))
for tag in missing_tags:
print(" - %s" % (tag))
local_conf.write(" - %s\n" % (tag))
else:
print("No missing tags")
|
Add an helper to create the metadata for a dir
|
Add an helper to create the metadata for a dir
|
Python
|
agpl-3.0
|
ivoire/DataTag,ivoire/DataTag,ivoire/DataTag
|
Add an helper to create the metadata for a dir
|
# -*- coding: utf-8 -*-
# vim: set ts=
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
import yaml
class Command(BaseCommand):
args = None
help = 'Collect the tags from the sub-directories'
option_list = BaseCommand.option_list + (
make_option('-p', '--pattern',
dest='pattern',
help='Pattern when matching the files',
default='*'),
)
def handle(self, *args, **kwargs):
pattern = kwargs['pattern']
new_tags = set(args)
# Load the tags from the configuration
local_tags = set()
pattern_in_local = False
try:
with open('.DataTag.yaml', 'r') as local_conf:
local_conf = yaml.load(local_conf)
if pattern in local_conf:
pattern_in_local = True
for tag in local_conf[pattern]['tags']:
local_tags.add(tag)
except IOError:
print 'no such file or directory'
pass
# Add the tags that are missing in the configuration
missing_tags = new_tags - local_tags
if missing_tags:
print("Adding missing tags")
print("===================")
if pattern_in_local:
print "TODO"
else:
with open('.DataTag.yaml', 'a+') as local_conf:
if '*' in pattern or ':' in pattern:
local_conf.write("'%s':\n tags:\n" % (pattern))
else:
local_conf.write("%s:\n tags:\n" % (pattern))
for tag in missing_tags:
print(" - %s" % (tag))
local_conf.write(" - %s\n" % (tag))
else:
print("No missing tags")
|
<commit_before><commit_msg>Add an helper to create the metadata for a dir<commit_after>
|
# -*- coding: utf-8 -*-
# vim: set ts=
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
import yaml
class Command(BaseCommand):
args = None
help = 'Collect the tags from the sub-directories'
option_list = BaseCommand.option_list + (
make_option('-p', '--pattern',
dest='pattern',
help='Pattern when matching the files',
default='*'),
)
def handle(self, *args, **kwargs):
pattern = kwargs['pattern']
new_tags = set(args)
# Load the tags from the configuration
local_tags = set()
pattern_in_local = False
try:
with open('.DataTag.yaml', 'r') as local_conf:
local_conf = yaml.load(local_conf)
if pattern in local_conf:
pattern_in_local = True
for tag in local_conf[pattern]['tags']:
local_tags.add(tag)
except IOError:
print 'no such file or directory'
pass
# Add the tags that are missing in the configuration
missing_tags = new_tags - local_tags
if missing_tags:
print("Adding missing tags")
print("===================")
if pattern_in_local:
print "TODO"
else:
with open('.DataTag.yaml', 'a+') as local_conf:
if '*' in pattern or ':' in pattern:
local_conf.write("'%s':\n tags:\n" % (pattern))
else:
local_conf.write("%s:\n tags:\n" % (pattern))
for tag in missing_tags:
print(" - %s" % (tag))
local_conf.write(" - %s\n" % (tag))
else:
print("No missing tags")
|
Add an helper to create the metadata for a dir# -*- coding: utf-8 -*-
# vim: set ts=
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
import yaml
class Command(BaseCommand):
args = None
help = 'Collect the tags from the sub-directories'
option_list = BaseCommand.option_list + (
make_option('-p', '--pattern',
dest='pattern',
help='Pattern when matching the files',
default='*'),
)
def handle(self, *args, **kwargs):
pattern = kwargs['pattern']
new_tags = set(args)
# Load the tags from the configuration
local_tags = set()
pattern_in_local = False
try:
with open('.DataTag.yaml', 'r') as local_conf:
local_conf = yaml.load(local_conf)
if pattern in local_conf:
pattern_in_local = True
for tag in local_conf[pattern]['tags']:
local_tags.add(tag)
except IOError:
print 'no such file or directory'
pass
# Add the tags that are missing in the configuration
missing_tags = new_tags - local_tags
if missing_tags:
print("Adding missing tags")
print("===================")
if pattern_in_local:
print "TODO"
else:
with open('.DataTag.yaml', 'a+') as local_conf:
if '*' in pattern or ':' in pattern:
local_conf.write("'%s':\n tags:\n" % (pattern))
else:
local_conf.write("%s:\n tags:\n" % (pattern))
for tag in missing_tags:
print(" - %s" % (tag))
local_conf.write(" - %s\n" % (tag))
else:
print("No missing tags")
|
<commit_before><commit_msg>Add an helper to create the metadata for a dir<commit_after># -*- coding: utf-8 -*-
# vim: set ts=
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
import yaml
class Command(BaseCommand):
args = None
help = 'Collect the tags from the sub-directories'
option_list = BaseCommand.option_list + (
make_option('-p', '--pattern',
dest='pattern',
help='Pattern when matching the files',
default='*'),
)
def handle(self, *args, **kwargs):
pattern = kwargs['pattern']
new_tags = set(args)
# Load the tags from the configuration
local_tags = set()
pattern_in_local = False
try:
with open('.DataTag.yaml', 'r') as local_conf:
local_conf = yaml.load(local_conf)
if pattern in local_conf:
pattern_in_local = True
for tag in local_conf[pattern]['tags']:
local_tags.add(tag)
except IOError:
print 'no such file or directory'
pass
# Add the tags that are missing in the configuration
missing_tags = new_tags - local_tags
if missing_tags:
print("Adding missing tags")
print("===================")
if pattern_in_local:
print "TODO"
else:
with open('.DataTag.yaml', 'a+') as local_conf:
if '*' in pattern or ':' in pattern:
local_conf.write("'%s':\n tags:\n" % (pattern))
else:
local_conf.write("%s:\n tags:\n" % (pattern))
for tag in missing_tags:
print(" - %s" % (tag))
local_conf.write(" - %s\n" % (tag))
else:
print("No missing tags")
|
|
be8b2f0d82131f04bd4c3e4d8d4a017734e4efa6
|
python/use_decorator.py
|
python/use_decorator.py
|
#class to print out the x, y coordinate
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Coord: " + str(self.__dict__)
#function to add
def add(a, b):
return Coordinate(a.x + b.x, a.y + b.y)
#function to substract
def sub(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
one = Coordinate(100, 200)
two = Coordinate(300, 200)
def wrapper(func):
def checker(a, b): # 1
if a.x < 0 or a.y < 0:
a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > 0 else 0)
if b.x < 0 or b.y < 0:
b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0)
ret = func(a, b)
if ret.x < 0 or ret.y < 0:
ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0)
return ret
return checker
#Here we got negative value from the sub function
print sub(one, two)
#But we need check if the result is negative, then we want it to be 0 (non-negative)
#We add a wrapper on it, this is actually a decoration on the original function
add = wrapper(add)
sub = wrapper(sub)
print sub(one, two)
#now use the decoratior operator
@wrapper
def sub1(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
#we will get the same result as above
print sub1(one, two)
|
Add a script to show how to use decorator
|
Add a script to show how to use decorator
|
Python
|
bsd-3-clause
|
qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script
|
Add a script to show how to use decorator
|
#class to print out the x, y coordinate
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Coord: " + str(self.__dict__)
#function to add
def add(a, b):
return Coordinate(a.x + b.x, a.y + b.y)
#function to substract
def sub(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
one = Coordinate(100, 200)
two = Coordinate(300, 200)
def wrapper(func):
def checker(a, b): # 1
if a.x < 0 or a.y < 0:
a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > 0 else 0)
if b.x < 0 or b.y < 0:
b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0)
ret = func(a, b)
if ret.x < 0 or ret.y < 0:
ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0)
return ret
return checker
#Here we got negative value from the sub function
print sub(one, two)
#But we need check if the result is negative, then we want it to be 0 (non-negative)
#We add a wrapper on it, this is actually a decoration on the original function
add = wrapper(add)
sub = wrapper(sub)
print sub(one, two)
#now use the decoratior operator
@wrapper
def sub1(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
#we will get the same result as above
print sub1(one, two)
|
<commit_before><commit_msg>Add a script to show how to use decorator<commit_after>
|
#class to print out the x, y coordinate
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Coord: " + str(self.__dict__)
#function to add
def add(a, b):
return Coordinate(a.x + b.x, a.y + b.y)
#function to substract
def sub(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
one = Coordinate(100, 200)
two = Coordinate(300, 200)
def wrapper(func):
def checker(a, b): # 1
if a.x < 0 or a.y < 0:
a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > 0 else 0)
if b.x < 0 or b.y < 0:
b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0)
ret = func(a, b)
if ret.x < 0 or ret.y < 0:
ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0)
return ret
return checker
#Here we got negative value from the sub function
print sub(one, two)
#But we need check if the result is negative, then we want it to be 0 (non-negative)
#We add a wrapper on it, this is actually a decoration on the original function
add = wrapper(add)
sub = wrapper(sub)
print sub(one, two)
#now use the decoratior operator
@wrapper
def sub1(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
#we will get the same result as above
print sub1(one, two)
|
Add a script to show how to use decorator#class to print out the x, y coordinate
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Coord: " + str(self.__dict__)
#function to add
def add(a, b):
return Coordinate(a.x + b.x, a.y + b.y)
#function to substract
def sub(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
one = Coordinate(100, 200)
two = Coordinate(300, 200)
def wrapper(func):
def checker(a, b): # 1
if a.x < 0 or a.y < 0:
a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > 0 else 0)
if b.x < 0 or b.y < 0:
b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0)
ret = func(a, b)
if ret.x < 0 or ret.y < 0:
ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0)
return ret
return checker
#Here we got negative value from the sub function
print sub(one, two)
#But we need check if the result is negative, then we want it to be 0 (non-negative)
#We add a wrapper on it, this is actually a decoration on the original function
add = wrapper(add)
sub = wrapper(sub)
print sub(one, two)
#now use the decoratior operator
@wrapper
def sub1(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
#we will get the same result as above
print sub1(one, two)
|
<commit_before><commit_msg>Add a script to show how to use decorator<commit_after>#class to print out the x, y coordinate
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Coord: " + str(self.__dict__)
#function to add
def add(a, b):
return Coordinate(a.x + b.x, a.y + b.y)
#function to substract
def sub(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
one = Coordinate(100, 200)
two = Coordinate(300, 200)
def wrapper(func):
def checker(a, b): # 1
if a.x < 0 or a.y < 0:
a = Coordinate(a.x if a.x > 0 else 0, a.y if a.y > 0 else 0)
if b.x < 0 or b.y < 0:
b = Coordinate(b.x if b.x > 0 else 0, b.y if b.y > 0 else 0)
ret = func(a, b)
if ret.x < 0 or ret.y < 0:
ret = Coordinate(ret.x if ret.x > 0 else 0, ret.y if ret.y > 0 else 0)
return ret
return checker
#Here we got negative value from the sub function
print sub(one, two)
#But we need check if the result is negative, then we want it to be 0 (non-negative)
#We add a wrapper on it, this is actually a decoration on the original function
add = wrapper(add)
sub = wrapper(sub)
print sub(one, two)
#now use the decoratior operator
@wrapper
def sub1(a, b):
return Coordinate(a.x - b.x, a.y - b.y)
#we will get the same result as above
print sub1(one, two)
|
|
127a9b4a71863844af4e8c573a962802d10a6403
|
tests/test_output.py
|
tests/test_output.py
|
#!/usr/bin/env python
"""
Unit tests for the output of see for various types of object.
"""
import itertools
try:
import unittest2 as unittest
except ImportError:
import unittest
import see
def union(*sets):
return set(itertools.chain(*sets))
SIGN_OPS = set(['+obj', '-obj'])
NUMBER_OPS = set('+ - * / // % **'.split())
NUMBER_ASSIGN_OPS = set()
BITWISE_OPS = set('<< >> & ^ | ~'.split())
BITWISE_ASSIGN_OPS = set(op + '=' for op in BITWISE_OPS)
COMPARE_OPS = set('< <= == != > >='.split())
MATRIX_OPS = set(['@'])
MATRIX_ASSIGN_OPS = set(['@='])
ALL_OPS = union(
SIGN_OPS,
NUMBER_OPS,
NUMBER_ASSIGN_OPS,
BITWISE_OPS,
BITWISE_ASSIGN_OPS,
COMPARE_OPS,
MATRIX_OPS,
MATRIX_ASSIGN_OPS,
)
class TestSeeOutput(unittest.TestCase):
def check_ops(self, obj_type, expected_ops, see_output):
for op in ALL_OPS:
if op in expected_ops:
self.assertTrue(
op in see_output,
'expected %s to support %s' % (obj_type, op))
else:
self.assertFalse(
op in see_output,
'expected %s not to support %s' % (obj_type, op))
def test_int(self):
obj = 1
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
BITWISE_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1)
obj_see = see.see(obj)
self.check_ops('int literal', lit_ops, lit_see)
self.check_ops('int object', obj_ops, obj_see)
def test_float(self):
obj = 1.0
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1.0)
obj_see = see.see(obj)
self.check_ops('float literal', lit_ops, lit_see)
self.check_ops('float object', obj_ops, obj_see)
if __name__ == '__main__':
unittest.main()
|
Test some expected output of see() for int & float
|
Test some expected output of see() for int & float
|
Python
|
bsd-3-clause
|
araile/see
|
Test some expected output of see() for int & float
|
#!/usr/bin/env python
"""
Unit tests for the output of see for various types of object.
"""
import itertools
try:
import unittest2 as unittest
except ImportError:
import unittest
import see
def union(*sets):
return set(itertools.chain(*sets))
SIGN_OPS = set(['+obj', '-obj'])
NUMBER_OPS = set('+ - * / // % **'.split())
NUMBER_ASSIGN_OPS = set()
BITWISE_OPS = set('<< >> & ^ | ~'.split())
BITWISE_ASSIGN_OPS = set(op + '=' for op in BITWISE_OPS)
COMPARE_OPS = set('< <= == != > >='.split())
MATRIX_OPS = set(['@'])
MATRIX_ASSIGN_OPS = set(['@='])
ALL_OPS = union(
SIGN_OPS,
NUMBER_OPS,
NUMBER_ASSIGN_OPS,
BITWISE_OPS,
BITWISE_ASSIGN_OPS,
COMPARE_OPS,
MATRIX_OPS,
MATRIX_ASSIGN_OPS,
)
class TestSeeOutput(unittest.TestCase):
def check_ops(self, obj_type, expected_ops, see_output):
for op in ALL_OPS:
if op in expected_ops:
self.assertTrue(
op in see_output,
'expected %s to support %s' % (obj_type, op))
else:
self.assertFalse(
op in see_output,
'expected %s not to support %s' % (obj_type, op))
def test_int(self):
obj = 1
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
BITWISE_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1)
obj_see = see.see(obj)
self.check_ops('int literal', lit_ops, lit_see)
self.check_ops('int object', obj_ops, obj_see)
def test_float(self):
obj = 1.0
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1.0)
obj_see = see.see(obj)
self.check_ops('float literal', lit_ops, lit_see)
self.check_ops('float object', obj_ops, obj_see)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test some expected output of see() for int & float<commit_after>
|
#!/usr/bin/env python
"""
Unit tests for the output of see for various types of object.
"""
import itertools
try:
import unittest2 as unittest
except ImportError:
import unittest
import see
def union(*sets):
return set(itertools.chain(*sets))
SIGN_OPS = set(['+obj', '-obj'])
NUMBER_OPS = set('+ - * / // % **'.split())
NUMBER_ASSIGN_OPS = set()
BITWISE_OPS = set('<< >> & ^ | ~'.split())
BITWISE_ASSIGN_OPS = set(op + '=' for op in BITWISE_OPS)
COMPARE_OPS = set('< <= == != > >='.split())
MATRIX_OPS = set(['@'])
MATRIX_ASSIGN_OPS = set(['@='])
ALL_OPS = union(
SIGN_OPS,
NUMBER_OPS,
NUMBER_ASSIGN_OPS,
BITWISE_OPS,
BITWISE_ASSIGN_OPS,
COMPARE_OPS,
MATRIX_OPS,
MATRIX_ASSIGN_OPS,
)
class TestSeeOutput(unittest.TestCase):
def check_ops(self, obj_type, expected_ops, see_output):
for op in ALL_OPS:
if op in expected_ops:
self.assertTrue(
op in see_output,
'expected %s to support %s' % (obj_type, op))
else:
self.assertFalse(
op in see_output,
'expected %s not to support %s' % (obj_type, op))
def test_int(self):
obj = 1
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
BITWISE_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1)
obj_see = see.see(obj)
self.check_ops('int literal', lit_ops, lit_see)
self.check_ops('int object', obj_ops, obj_see)
def test_float(self):
obj = 1.0
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1.0)
obj_see = see.see(obj)
self.check_ops('float literal', lit_ops, lit_see)
self.check_ops('float object', obj_ops, obj_see)
if __name__ == '__main__':
unittest.main()
|
Test some expected output of see() for int & float#!/usr/bin/env python
"""
Unit tests for the output of see for various types of object.
"""
import itertools
try:
import unittest2 as unittest
except ImportError:
import unittest
import see
def union(*sets):
return set(itertools.chain(*sets))
SIGN_OPS = set(['+obj', '-obj'])
NUMBER_OPS = set('+ - * / // % **'.split())
NUMBER_ASSIGN_OPS = set()
BITWISE_OPS = set('<< >> & ^ | ~'.split())
BITWISE_ASSIGN_OPS = set(op + '=' for op in BITWISE_OPS)
COMPARE_OPS = set('< <= == != > >='.split())
MATRIX_OPS = set(['@'])
MATRIX_ASSIGN_OPS = set(['@='])
ALL_OPS = union(
SIGN_OPS,
NUMBER_OPS,
NUMBER_ASSIGN_OPS,
BITWISE_OPS,
BITWISE_ASSIGN_OPS,
COMPARE_OPS,
MATRIX_OPS,
MATRIX_ASSIGN_OPS,
)
class TestSeeOutput(unittest.TestCase):
def check_ops(self, obj_type, expected_ops, see_output):
for op in ALL_OPS:
if op in expected_ops:
self.assertTrue(
op in see_output,
'expected %s to support %s' % (obj_type, op))
else:
self.assertFalse(
op in see_output,
'expected %s not to support %s' % (obj_type, op))
def test_int(self):
obj = 1
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
BITWISE_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1)
obj_see = see.see(obj)
self.check_ops('int literal', lit_ops, lit_see)
self.check_ops('int object', obj_ops, obj_see)
def test_float(self):
obj = 1.0
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1.0)
obj_see = see.see(obj)
self.check_ops('float literal', lit_ops, lit_see)
self.check_ops('float object', obj_ops, obj_see)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test some expected output of see() for int & float<commit_after>#!/usr/bin/env python
"""
Unit tests for the output of see for various types of object.
"""
import itertools
try:
import unittest2 as unittest
except ImportError:
import unittest
import see
def union(*sets):
return set(itertools.chain(*sets))
SIGN_OPS = set(['+obj', '-obj'])
NUMBER_OPS = set('+ - * / // % **'.split())
NUMBER_ASSIGN_OPS = set()
BITWISE_OPS = set('<< >> & ^ | ~'.split())
BITWISE_ASSIGN_OPS = set(op + '=' for op in BITWISE_OPS)
COMPARE_OPS = set('< <= == != > >='.split())
MATRIX_OPS = set(['@'])
MATRIX_ASSIGN_OPS = set(['@='])
ALL_OPS = union(
SIGN_OPS,
NUMBER_OPS,
NUMBER_ASSIGN_OPS,
BITWISE_OPS,
BITWISE_ASSIGN_OPS,
COMPARE_OPS,
MATRIX_OPS,
MATRIX_ASSIGN_OPS,
)
class TestSeeOutput(unittest.TestCase):
def check_ops(self, obj_type, expected_ops, see_output):
for op in ALL_OPS:
if op in expected_ops:
self.assertTrue(
op in see_output,
'expected %s to support %s' % (obj_type, op))
else:
self.assertFalse(
op in see_output,
'expected %s not to support %s' % (obj_type, op))
def test_int(self):
obj = 1
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
BITWISE_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1)
obj_see = see.see(obj)
self.check_ops('int literal', lit_ops, lit_see)
self.check_ops('int object', obj_ops, obj_see)
def test_float(self):
obj = 1.0
lit_ops = union(
SIGN_OPS,
NUMBER_OPS,
COMPARE_OPS,
)
obj_ops = union(
lit_ops,
)
lit_see = see.see(1.0)
obj_see = see.see(obj)
self.check_ops('float literal', lit_ops, lit_see)
self.check_ops('float object', obj_ops, obj_see)
if __name__ == '__main__':
unittest.main()
|
|
20d3f464a252df8226d58c45a86cc1a959814e59
|
django_mysql/utils.py
|
django_mysql/utils.py
|
# -*- coding:utf-8 -*-
from __future__ import division
class WeightedAverageRate(object):
# Sub: new
#
# Required Arguments:
# target_t - Target time for t in <update()>
#
# Optional Arguments:
# weight - Weight of previous n/t values (default 0.75).
#
# Returns:
# WeightedAvgRate
def __init__(self, target_t, weight=0.75):
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
# Update weighted average rate. Param n is generic; it's how many of
# whatever the caller is doing (rows, checksums, etc.). Param s is how
# long this n took, in seconds (hi-res or not).
#
# Parameters:
# n - Number of operations (rows, etc.)
# t - Amount of time in seconds that n took
#
# Returns:
# n adjust to meet target_t based on weighted decaying avg rate
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
avg_rate = self.avg_n / self.avg_t
new_n = int(avg_rate * self.target_t)
return new_n
# if ( $self->{avg_n} && $self->{avg_t} ) {
# $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
# $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
# }
# else {
# $self->{avg_n} = $n;
# $self->{avg_t} = $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
# }
# my $new_n = int($self->{avg_rate} * $self->{target_t});
# PTDEBUG && _d('Adjust n to', $new_n);
# return $new_n;
|
Move towards using the weighted average strategy of pt-online-schema-change
|
Move towards using the weighted average strategy of pt-online-schema-change
|
Python
|
mit
|
nickmeharry/django-mysql,graingert/django-mysql,arnau126/django-mysql,adamchainz/django-mysql,arnau126/django-mysql,graingert/django-mysql,nickmeharry/django-mysql
|
Move towards using the weighted average strategy of pt-online-schema-change
|
# -*- coding:utf-8 -*-
from __future__ import division
class WeightedAverageRate(object):
# Sub: new
#
# Required Arguments:
# target_t - Target time for t in <update()>
#
# Optional Arguments:
# weight - Weight of previous n/t values (default 0.75).
#
# Returns:
# WeightedAvgRate
def __init__(self, target_t, weight=0.75):
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
# Update weighted average rate. Param n is generic; it's how many of
# whatever the caller is doing (rows, checksums, etc.). Param s is how
# long this n took, in seconds (hi-res or not).
#
# Parameters:
# n - Number of operations (rows, etc.)
# t - Amount of time in seconds that n took
#
# Returns:
# n adjust to meet target_t based on weighted decaying avg rate
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
avg_rate = self.avg_n / self.avg_t
new_n = int(avg_rate * self.target_t)
return new_n
# if ( $self->{avg_n} && $self->{avg_t} ) {
# $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
# $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
# }
# else {
# $self->{avg_n} = $n;
# $self->{avg_t} = $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
# }
# my $new_n = int($self->{avg_rate} * $self->{target_t});
# PTDEBUG && _d('Adjust n to', $new_n);
# return $new_n;
|
<commit_before><commit_msg>Move towards using the weighted average strategy of pt-online-schema-change<commit_after>
|
# -*- coding:utf-8 -*-
from __future__ import division
class WeightedAverageRate(object):
# Sub: new
#
# Required Arguments:
# target_t - Target time for t in <update()>
#
# Optional Arguments:
# weight - Weight of previous n/t values (default 0.75).
#
# Returns:
# WeightedAvgRate
def __init__(self, target_t, weight=0.75):
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
# Update weighted average rate. Param n is generic; it's how many of
# whatever the caller is doing (rows, checksums, etc.). Param s is how
# long this n took, in seconds (hi-res or not).
#
# Parameters:
# n - Number of operations (rows, etc.)
# t - Amount of time in seconds that n took
#
# Returns:
# n adjust to meet target_t based on weighted decaying avg rate
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
avg_rate = self.avg_n / self.avg_t
new_n = int(avg_rate * self.target_t)
return new_n
# if ( $self->{avg_n} && $self->{avg_t} ) {
# $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
# $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
# }
# else {
# $self->{avg_n} = $n;
# $self->{avg_t} = $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
# }
# my $new_n = int($self->{avg_rate} * $self->{target_t});
# PTDEBUG && _d('Adjust n to', $new_n);
# return $new_n;
|
Move towards using the weighted average strategy of pt-online-schema-change# -*- coding:utf-8 -*-
from __future__ import division
class WeightedAverageRate(object):
# Sub: new
#
# Required Arguments:
# target_t - Target time for t in <update()>
#
# Optional Arguments:
# weight - Weight of previous n/t values (default 0.75).
#
# Returns:
# WeightedAvgRate
def __init__(self, target_t, weight=0.75):
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
# Update weighted average rate. Param n is generic; it's how many of
# whatever the caller is doing (rows, checksums, etc.). Param s is how
# long this n took, in seconds (hi-res or not).
#
# Parameters:
# n - Number of operations (rows, etc.)
# t - Amount of time in seconds that n took
#
# Returns:
# n adjust to meet target_t based on weighted decaying avg rate
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
avg_rate = self.avg_n / self.avg_t
new_n = int(avg_rate * self.target_t)
return new_n
# if ( $self->{avg_n} && $self->{avg_t} ) {
# $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
# $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
# }
# else {
# $self->{avg_n} = $n;
# $self->{avg_t} = $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
# }
# my $new_n = int($self->{avg_rate} * $self->{target_t});
# PTDEBUG && _d('Adjust n to', $new_n);
# return $new_n;
|
<commit_before><commit_msg>Move towards using the weighted average strategy of pt-online-schema-change<commit_after># -*- coding:utf-8 -*-
from __future__ import division
class WeightedAverageRate(object):
# Sub: new
#
# Required Arguments:
# target_t - Target time for t in <update()>
#
# Optional Arguments:
# weight - Weight of previous n/t values (default 0.75).
#
# Returns:
# WeightedAvgRate
def __init__(self, target_t, weight=0.75):
self.target_t = target_t
self.avg_n = 0.0
self.avg_t = 0.0
self.weight = weight
def update(self, n, t):
# Update weighted average rate. Param n is generic; it's how many of
# whatever the caller is doing (rows, checksums, etc.). Param s is how
# long this n took, in seconds (hi-res or not).
#
# Parameters:
# n - Number of operations (rows, etc.)
# t - Amount of time in seconds that n took
#
# Returns:
# n adjust to meet target_t based on weighted decaying avg rate
if self.avg_n and self.avg_t:
self.avg_n = (self.avg_n * self.weight) + n
self.avg_t = (self.avg_t * self.weight) + t
else:
self.avg_n = n
self.avg_t = t
avg_rate = self.avg_n / self.avg_t
new_n = int(avg_rate * self.target_t)
return new_n
# if ( $self->{avg_n} && $self->{avg_t} ) {
# $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n;
# $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s');
# }
# else {
# $self->{avg_n} = $n;
# $self->{avg_t} = $t;
# $self->{avg_rate} = $self->{avg_n} / $self->{avg_t};
# PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s');
# }
# my $new_n = int($self->{avg_rate} * $self->{target_t});
# PTDEBUG && _d('Adjust n to', $new_n);
# return $new_n;
|
|
ef5d6cd3989de2f97ba20799ce5f34b87b320128
|
paper_to_git/commands/add_command.py
|
paper_to_git/commands/add_command.py
|
"""
Add a new sync object to sync between a git repo and Dropbox Paper.
"""
import os
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import Sync, PaperFolder
__all__ = [
'AddCommand',
]
class AddCommand(BaseCommand):
"""Add a new sync object between paper and git.
"""
name = 'add'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('--repo',
help='The path to the git repo.')
command_parser.add_argument('--path',
help='The path inside the repo')
command_parser.add_argument('--folder',
help='The folder name in the Paper')
def process(self, args):
repo = os.path.abspath(args.repo)
path = args.path
for folder in PaperFolder.select():
if folder.name.lower() == args.folder.lower():
paper_folder = folder
Sync.create(repo=repo, path_in_repo=path, folder=paper_folder)
|
Create add command to add new sync.
|
Create add command to add new sync.
|
Python
|
apache-2.0
|
maxking/paper-to-git,maxking/paper-to-git
|
Create add command to add new sync.
|
"""
Add a new sync object to sync between a git repo and Dropbox Paper.
"""
import os
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import Sync, PaperFolder
__all__ = [
'AddCommand',
]
class AddCommand(BaseCommand):
"""Add a new sync object between paper and git.
"""
name = 'add'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('--repo',
help='The path to the git repo.')
command_parser.add_argument('--path',
help='The path inside the repo')
command_parser.add_argument('--folder',
help='The folder name in the Paper')
def process(self, args):
repo = os.path.abspath(args.repo)
path = args.path
for folder in PaperFolder.select():
if folder.name.lower() == args.folder.lower():
paper_folder = folder
Sync.create(repo=repo, path_in_repo=path, folder=paper_folder)
|
<commit_before><commit_msg>Create add command to add new sync.<commit_after>
|
"""
Add a new sync object to sync between a git repo and Dropbox Paper.
"""
import os
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import Sync, PaperFolder
__all__ = [
'AddCommand',
]
class AddCommand(BaseCommand):
"""Add a new sync object between paper and git.
"""
name = 'add'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('--repo',
help='The path to the git repo.')
command_parser.add_argument('--path',
help='The path inside the repo')
command_parser.add_argument('--folder',
help='The folder name in the Paper')
def process(self, args):
repo = os.path.abspath(args.repo)
path = args.path
for folder in PaperFolder.select():
if folder.name.lower() == args.folder.lower():
paper_folder = folder
Sync.create(repo=repo, path_in_repo=path, folder=paper_folder)
|
Create add command to add new sync."""
Add a new sync object to sync between a git repo and Dropbox Paper.
"""
import os
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import Sync, PaperFolder
__all__ = [
'AddCommand',
]
class AddCommand(BaseCommand):
"""Add a new sync object between paper and git.
"""
name = 'add'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('--repo',
help='The path to the git repo.')
command_parser.add_argument('--path',
help='The path inside the repo')
command_parser.add_argument('--folder',
help='The folder name in the Paper')
def process(self, args):
repo = os.path.abspath(args.repo)
path = args.path
for folder in PaperFolder.select():
if folder.name.lower() == args.folder.lower():
paper_folder = folder
Sync.create(repo=repo, path_in_repo=path, folder=paper_folder)
|
<commit_before><commit_msg>Create add command to add new sync.<commit_after>"""
Add a new sync object to sync between a git repo and Dropbox Paper.
"""
import os
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import Sync, PaperFolder
__all__ = [
'AddCommand',
]
class AddCommand(BaseCommand):
"""Add a new sync object between paper and git.
"""
name = 'add'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('--repo',
help='The path to the git repo.')
command_parser.add_argument('--path',
help='The path inside the repo')
command_parser.add_argument('--folder',
help='The folder name in the Paper')
def process(self, args):
repo = os.path.abspath(args.repo)
path = args.path
for folder in PaperFolder.select():
if folder.name.lower() == args.folder.lower():
paper_folder = folder
Sync.create(repo=repo, path_in_repo=path, folder=paper_folder)
|
|
baaa94fcbb34ba8b1fe47d95537820e7ddfb0d7e
|
scikits/learn/externals/setup.py
|
scikits/learn/externals/setup.py
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
Make sure that joblib does get installed.
|
BUG: Make sure that joblib does get installed.
|
Python
|
bsd-3-clause
|
carrillo/scikit-learn,jorik041/scikit-learn,vibhorag/scikit-learn,nrhine1/scikit-learn,jorge2703/scikit-learn,rvraghav93/scikit-learn,ZENGXH/scikit-learn,stylianos-kampakis/scikit-learn,chrsrds/scikit-learn,tomlof/scikit-learn,kmike/scikit-learn,lin-credible/scikit-learn,cybernet14/scikit-learn,trankmichael/scikit-learn,joernhees/scikit-learn,thilbern/scikit-learn,OshynSong/scikit-learn,samuel1208/scikit-learn,chrisburr/scikit-learn,AIML/scikit-learn,kevin-intel/scikit-learn,mjgrav2001/scikit-learn,lesteve/scikit-learn,manashmndl/scikit-learn,jlegendary/scikit-learn,UNR-AERIAL/scikit-learn,loli/semisupervisedforests,huobaowangxi/scikit-learn,ephes/scikit-learn,xzh86/scikit-learn,poryfly/scikit-learn,Obus/scikit-learn,jseabold/scikit-learn,spallavolu/scikit-learn,Srisai85/scikit-learn,pv/scikit-learn,bhargav/scikit-learn,MatthieuBizien/scikit-learn,hsuantien/scikit-learn,equialgo/scikit-learn,clemkoa/scikit-learn,ashhher3/scikit-learn,florian-f/sklearn,joernhees/scikit-learn,Aasmi/scikit-learn,amueller/scikit-learn,macks22/scikit-learn,cl4rke/scikit-learn,andrewnc/scikit-learn,loli/sklearn-ensembletrees,shikhardb/scikit-learn,thilbern/scikit-learn,RPGOne/scikit-learn,Clyde-fare/scikit-learn,ky822/scikit-learn,rahuldhote/scikit-learn,kmike/scikit-learn,vinayak-mehta/scikit-learn,anurag313/scikit-learn,jpautom/scikit-learn,glemaitre/scikit-learn,eg-zhang/scikit-learn,mfjb/scikit-learn,gotomypc/scikit-learn,vortex-ape/scikit-learn,mwv/scikit-learn,rrohan/scikit-learn,clemkoa/scikit-learn,cwu2011/scikit-learn,pompiduskus/scikit-learn,deepesch/scikit-learn,mrshu/scikit-learn,Djabbz/scikit-learn,fabioticconi/scikit-learn,untom/scikit-learn,herilalaina/scikit-learn,treycausey/scikit-learn,MartinSavc/scikit-learn,NelisVerhoef/scikit-learn,abimannans/scikit-learn,mxjl620/scikit-learn,BiaDarkia/scikit-learn,xuewei4d/scikit-learn,phdowling/scikit-learn,ssaeger/scikit-learn,nrhine1/scikit-learn,lucidfrontier45/scikit-learn,ClimbsRocks/scikit-learn,depet/scikit-learn,tmhm/scikit-learn,thilbern/scikit-learn,etkirsch/scikit-learn,saiwing-yeung/scikit-learn,rahuldhote/scikit-learn,nmayorov/scikit-learn,vinayak-mehta/scikit-learn,ilyes14/scikit-learn,B3AU/waveTree,scikit-learn/scikit-learn,sarahgrogan/scikit-learn,cl4rke/scikit-learn,victorbergelin/scikit-learn,kashif/scikit-learn,jkarnows/scikit-learn,espg/scikit-learn,ominux/scikit-learn,liangz0707/scikit-learn,carrillo/scikit-learn,xyguo/scikit-learn,huzq/scikit-learn,michigraber/scikit-learn,hsiaoyi0504/scikit-learn,cauchycui/scikit-learn,vivekmishra1991/scikit-learn,amueller/scikit-learn,dsullivan7/scikit-learn,NelisVerhoef/scikit-learn,OshynSong/scikit-learn,cwu2011/scikit-learn,pompiduskus/scikit-learn,shusenl/scikit-learn,lbishal/scikit-learn,q1ang/scikit-learn,jkarnows/scikit-learn,ishanic/scikit-learn,alexeyum/scikit-learn,amueller/scikit-learn,0x0all/scikit-learn,dsquareindia/scikit-learn,vigilv/scikit-learn,sarahgrogan/scikit-learn,cybernet14/scikit-learn,btabibian/scikit-learn,robin-lai/scikit-learn,0asa/scikit-learn,ltiao/scikit-learn,russel1237/scikit-learn,cainiaocome/scikit-learn,michigraber/scikit-learn,mattilyra/scikit-learn,bthirion/scikit-learn,Akshay0724/scikit-learn,rishikksh20/scikit-learn,andrewnc/scikit-learn,samzhang111/scikit-learn,ominux/scikit-learn,jereze/scikit-learn,krez13/scikit-learn,Obus/scikit-learn,tawsifkhan/scikit-learn,khkaminska/scikit-learn,joernhees/scikit-learn,wlamond/scikit-learn,zhenv5/scikit-learn,cainiaocome/scikit-learn,ChanderG/scikit-learn,cainiaocome/scikit-learn,huzq/scikit-learn,fredhusser/scikit-learn,jayflo/scikit-learn,RayMick/scikit-learn,xavierwu/scikit-learn,ashhher3/scikit-learn,schets/scikit-learn,Aasmi/scikit-learn,vibhorag/scikit-learn,fzalkow/scikit-learn,yask123/scikit-learn,hugobowne/scikit-learn,equialgo/scikit-learn,shikhardb/scikit-learn,herilalaina/scikit-learn,3manuek/scikit-learn,sonnyhu/scikit-learn,loli/semisupervisedforests,depet/scikit-learn,djgagne/scikit-learn,Adai0808/scikit-learn,trungnt13/scikit-learn,gotomypc/scikit-learn,tdhopper/scikit-learn,xyguo/scikit-learn,AnasGhrab/scikit-learn,sumspr/scikit-learn,espg/scikit-learn,bigdataelephants/scikit-learn,Aasmi/scikit-learn,herilalaina/scikit-learn,wzbozon/scikit-learn,hitszxp/scikit-learn,hainm/scikit-learn,roxyboy/scikit-learn,manhhomienbienthuy/scikit-learn,NunoEdgarGub1/scikit-learn,hugobowne/scikit-learn,kylerbrown/scikit-learn,DSLituiev/scikit-learn,Achuth17/scikit-learn,abhishekkrthakur/scikit-learn,mhdella/scikit-learn,AlexRobson/scikit-learn,sanketloke/scikit-learn,mjgrav2001/scikit-learn,Srisai85/scikit-learn,rohanp/scikit-learn,vortex-ape/scikit-learn,khkaminska/scikit-learn,jseabold/scikit-learn,wlamond/scikit-learn,shenzebang/scikit-learn,3manuek/scikit-learn,cdegroc/scikit-learn,olologin/scikit-learn,q1ang/scikit-learn,UNR-AERIAL/scikit-learn,cwu2011/scikit-learn,RachitKansal/scikit-learn,mblondel/scikit-learn,ClimbsRocks/scikit-learn,wazeerzulfikar/scikit-learn,plissonf/scikit-learn,sarahgrogan/scikit-learn,mrshu/scikit-learn,plissonf/scikit-learn,ycaihua/scikit-learn,ahoyosid/scikit-learn,Djabbz/scikit-learn,loli/sklearn-ensembletrees,hrjn/scikit-learn,yask123/scikit-learn,mojoboss/scikit-learn,vybstat/scikit-learn,davidgbe/scikit-learn,betatim/scikit-learn,raghavrv/scikit-learn,mlyundin/scikit-learn,jkarnows/scikit-learn,vibhorag/scikit-learn,hsuantien/scikit-learn,sergeyf/scikit-learn,pypot/scikit-learn,LiaoPan/scikit-learn,ankurankan/scikit-learn,shenzebang/scikit-learn,devanshdalal/scikit-learn,nmayorov/scikit-learn,saiwing-yeung/scikit-learn,florian-f/sklearn,jblackburne/scikit-learn,beepee14/scikit-learn,ltiao/scikit-learn,ivannz/scikit-learn,Achuth17/scikit-learn,kjung/scikit-learn,victorbergelin/scikit-learn,florian-f/sklearn,stylianos-kampakis/scikit-learn,Adai0808/scikit-learn,pnedunuri/scikit-learn,liyu1990/sklearn,IssamLaradji/scikit-learn,B3AU/waveTree,ChanChiChoi/scikit-learn,terkkila/scikit-learn,petosegan/scikit-learn,khkaminska/scikit-learn,beepee14/scikit-learn,LiaoPan/scikit-learn,madjelan/scikit-learn,CforED/Machine-Learning,ominux/scikit-learn,xwolf12/scikit-learn,Akshay0724/scikit-learn,MartinSavc/scikit-learn,vortex-ape/scikit-learn,sonnyhu/scikit-learn,devanshdalal/scikit-learn,liberatorqjw/scikit-learn,ningchi/scikit-learn,joshloyal/scikit-learn,DonBeo/scikit-learn,toastedcornflakes/scikit-learn,eickenberg/scikit-learn,MohammedWasim/scikit-learn,fabioticconi/scikit-learn,rahuldhote/scikit-learn,Lawrence-Liu/scikit-learn,JsNoNo/scikit-learn,dhruv13J/scikit-learn,siutanwong/scikit-learn,abhishekgahlot/scikit-learn,aetilley/scikit-learn,kaichogami/scikit-learn,pythonvietnam/scikit-learn,themrmax/scikit-learn,joshloyal/scikit-learn,devanshdalal/scikit-learn,ningchi/scikit-learn,billy-inn/scikit-learn,JosmanPS/scikit-learn,anirudhjayaraman/scikit-learn,jm-begon/scikit-learn,poryfly/scikit-learn,bikong2/scikit-learn,alexsavio/scikit-learn,vermouthmjl/scikit-learn,shyamalschandra/scikit-learn,lucidfrontier45/scikit-learn,maheshakya/scikit-learn,rexshihaoren/scikit-learn,voxlol/scikit-learn,aabadie/scikit-learn,jblackburne/scikit-learn,LohithBlaze/scikit-learn,jlegendary/scikit-learn,Srisai85/scikit-learn,trungnt13/scikit-learn,procoder317/scikit-learn,untom/scikit-learn,xavierwu/scikit-learn,Titan-C/scikit-learn,elkingtonmcb/scikit-learn,Sentient07/scikit-learn,arabenjamin/scikit-learn,r-mart/scikit-learn,saiwing-yeung/scikit-learn,fengzhyuan/scikit-learn,russel1237/scikit-learn,henrykironde/scikit-learn,sarahgrogan/scikit-learn,rsivapr/scikit-learn,massmutual/scikit-learn,AlexandreAbraham/scikit-learn,PrashntS/scikit-learn,Jimmy-Morzaria/scikit-learn,Sentient07/scikit-learn,sgenoud/scikit-learn,jereze/scikit-learn,kylerbrown/scikit-learn,xubenben/scikit-learn,icdishb/scikit-learn,etkirsch/scikit-learn,cauchycui/scikit-learn,maheshakya/scikit-learn,cdegroc/scikit-learn,hugobowne/scikit-learn,ivannz/scikit-learn,akionakamura/scikit-learn,wazeerzulfikar/scikit-learn,lenovor/scikit-learn,arabenjamin/scikit-learn,nvoron23/scikit-learn,gclenaghan/scikit-learn,arjoly/scikit-learn,herilalaina/scikit-learn,terkkila/scikit-learn,ominux/scikit-learn,thientu/scikit-learn,beepee14/scikit-learn,DonBeo/scikit-learn,anntzer/scikit-learn,billy-inn/scikit-learn,CVML/scikit-learn,aminert/scikit-learn,Jimmy-Morzaria/scikit-learn,jorik041/scikit-learn,ky822/scikit-learn,hitszxp/scikit-learn,xuewei4d/scikit-learn,anntzer/scikit-learn,icdishb/scikit-learn,arjoly/scikit-learn,TomDLT/scikit-learn,rvraghav93/scikit-learn,UNR-AERIAL/scikit-learn,waterponey/scikit-learn,OshynSong/scikit-learn,RachitKansal/scikit-learn,kashif/scikit-learn,lbishal/scikit-learn,anirudhjayaraman/scikit-learn,aflaxman/scikit-learn,fbagirov/scikit-learn,andaag/scikit-learn,costypetrisor/scikit-learn,hrjn/scikit-learn,rahul-c1/scikit-learn,ChanderG/scikit-learn,potash/scikit-learn,mehdidc/scikit-learn,Obus/scikit-learn,zuku1985/scikit-learn,samzhang111/scikit-learn,pnedunuri/scikit-learn,dhruv13J/scikit-learn,Lawrence-Liu/scikit-learn,sgenoud/scikit-learn,lin-credible/scikit-learn,mlyundin/scikit-learn,vigilv/scikit-learn,xubenben/scikit-learn,mhue/scikit-learn,kagayakidan/scikit-learn,jayflo/scikit-learn,xyguo/scikit-learn,MartinDelzant/scikit-learn,Jimmy-Morzaria/scikit-learn,wazeerzulfikar/scikit-learn,phdowling/scikit-learn,frank-tancf/scikit-learn,kashif/scikit-learn,ZenDevelopmentSystems/scikit-learn,vybstat/scikit-learn,procoder317/scikit-learn,Garrett-R/scikit-learn,rishikksh20/scikit-learn,wanggang3333/scikit-learn,rohanp/scikit-learn,jakirkham/scikit-learn,madjelan/scikit-learn,AIML/scikit-learn,Adai0808/scikit-learn,huzq/scikit-learn,jlegendary/scikit-learn,tosolveit/scikit-learn,victorbergelin/scikit-learn,clemkoa/scikit-learn,ngoix/OCRF,aminert/scikit-learn,BiaDarkia/scikit-learn,yunfeilu/scikit-learn,chrsrds/scikit-learn,voxlol/scikit-learn,mayblue9/scikit-learn,CVML/scikit-learn,jorge2703/scikit-learn,qifeigit/scikit-learn,walterreade/scikit-learn,murali-munna/scikit-learn,andaag/scikit-learn,siutanwong/scikit-learn,arahuja/scikit-learn,sumspr/scikit-learn,eickenberg/scikit-learn,justincassidy/scikit-learn,anurag313/scikit-learn,0asa/scikit-learn,glemaitre/scikit-learn,depet/scikit-learn,TomDLT/scikit-learn,iismd17/scikit-learn,Lawrence-Liu/scikit-learn,B3AU/waveTree,wanggang3333/scikit-learn,shyamalschandra/scikit-learn,mlyundin/scikit-learn,zhenv5/scikit-learn,spallavolu/scikit-learn,nhejazi/scikit-learn,MechCoder/scikit-learn,chrisburr/scikit-learn,pratapvardhan/scikit-learn,dhruv13J/scikit-learn,khkaminska/scikit-learn,PrashntS/scikit-learn,robbymeals/scikit-learn,Djabbz/scikit-learn,petosegan/scikit-learn,AlexRobson/scikit-learn,samuel1208/scikit-learn,luo66/scikit-learn,mfjb/scikit-learn,vybstat/scikit-learn,ngoix/OCRF,cauchycui/scikit-learn,rsivapr/scikit-learn,jmetzen/scikit-learn,mblondel/scikit-learn,luo66/scikit-learn,liberatorqjw/scikit-learn,pkruskal/scikit-learn,nesterione/scikit-learn,hainm/scikit-learn,etkirsch/scikit-learn,Nyker510/scikit-learn,sanketloke/scikit-learn,harshaneelhg/scikit-learn,robbymeals/scikit-learn,shahankhatch/scikit-learn,schets/scikit-learn,AlexanderFabisch/scikit-learn,shusenl/scikit-learn,vibhorag/scikit-learn,JsNoNo/scikit-learn,maheshakya/scikit-learn,madjelan/scikit-learn,massmutual/scikit-learn,vermouthmjl/scikit-learn,fabioticconi/scikit-learn,xiaoxiamii/scikit-learn,theoryno3/scikit-learn,pratapvardhan/scikit-learn,ephes/scikit-learn,simon-pepin/scikit-learn,massmutual/scikit-learn,krez13/scikit-learn,justincassidy/scikit-learn,samuel1208/scikit-learn,ldirer/scikit-learn,vinayak-mehta/scikit-learn,nvoron23/scikit-learn,idlead/scikit-learn,RayMick/scikit-learn,ycaihua/scikit-learn,themrmax/scikit-learn,jjx02230808/project0223,nrhine1/scikit-learn,mehdidc/scikit-learn,alexsavio/scikit-learn,IssamLaradji/scikit-learn,maheshakya/scikit-learn,mojoboss/scikit-learn,shikhardb/scikit-learn,eickenberg/scikit-learn,pnedunuri/scikit-learn,quheng/scikit-learn,hsiaoyi0504/scikit-learn,pompiduskus/scikit-learn,jzt5132/scikit-learn,pompiduskus/scikit-learn,schets/scikit-learn,marcocaccin/scikit-learn,theoryno3/scikit-learn,zorroblue/scikit-learn,michigraber/scikit-learn,mehdidc/scikit-learn,samzhang111/scikit-learn,lazywei/scikit-learn,mhue/scikit-learn,huzq/scikit-learn,idlead/scikit-learn,alexsavio/scikit-learn,bnaul/scikit-learn,IssamLaradji/scikit-learn,vybstat/scikit-learn,larsmans/scikit-learn,vshtanko/scikit-learn,mblondel/scikit-learn,belltailjp/scikit-learn,jaidevd/scikit-learn,fbagirov/scikit-learn,kashif/scikit-learn,heli522/scikit-learn,akionakamura/scikit-learn,ningchi/scikit-learn,sinhrks/scikit-learn,cdegroc/scikit-learn,fzalkow/scikit-learn,bigdataelephants/scikit-learn,nomadcube/scikit-learn,MatthieuBizien/scikit-learn,kylerbrown/scikit-learn,liangz0707/scikit-learn,dsquareindia/scikit-learn,alvarofierroclavero/scikit-learn,yonglehou/scikit-learn,huobaowangxi/scikit-learn,jakobworldpeace/scikit-learn,deepesch/scikit-learn,murali-munna/scikit-learn,JPFrancoia/scikit-learn,ZENGXH/scikit-learn,gclenaghan/scikit-learn,djgagne/scikit-learn,zihua/scikit-learn,ephes/scikit-learn,ssaeger/scikit-learn,fyffyt/scikit-learn,JeanKossaifi/scikit-learn,icdishb/scikit-learn,AnasGhrab/scikit-learn,shusenl/scikit-learn,mjudsp/Tsallis,frank-tancf/scikit-learn,henrykironde/scikit-learn,liberatorqjw/scikit-learn,samzhang111/scikit-learn,rvraghav93/scikit-learn,imaculate/scikit-learn,olologin/scikit-learn,abimannans/scikit-learn,mattilyra/scikit-learn,anntzer/scikit-learn,schets/scikit-learn,appapantula/scikit-learn,yyjiang/scikit-learn,deepesch/scikit-learn,466152112/scikit-learn,hitszxp/scikit-learn,tdhopper/scikit-learn,jorik041/scikit-learn,PatrickOReilly/scikit-learn,aewhatley/scikit-learn,mwv/scikit-learn,evgchz/scikit-learn,jblackburne/scikit-learn,pianomania/scikit-learn,abhishekgahlot/scikit-learn,henridwyer/scikit-learn,aewhatley/scikit-learn,YinongLong/scikit-learn,tomlof/scikit-learn,moutai/scikit-learn,glennq/scikit-learn,stylianos-kampakis/scikit-learn,AnasGhrab/scikit-learn,rahul-c1/scikit-learn,HolgerPeters/scikit-learn,zhenv5/scikit-learn,wzbozon/scikit-learn,arabenjamin/scikit-learn,JPFrancoia/scikit-learn,simon-pepin/scikit-learn,nelson-liu/scikit-learn,rexshihaoren/scikit-learn,betatim/scikit-learn,Nyker510/scikit-learn,yask123/scikit-learn,0x0all/scikit-learn,vigilv/scikit-learn,mayblue9/scikit-learn,simon-pepin/scikit-learn,tawsifkhan/scikit-learn,plissonf/scikit-learn,hlin117/scikit-learn,fbagirov/scikit-learn,Barmaley-exe/scikit-learn,nelson-liu/scikit-learn,abhishekgahlot/scikit-learn,wlamond/scikit-learn,nhejazi/scikit-learn,zorojean/scikit-learn,mikebenfield/scikit-learn,gclenaghan/scikit-learn,gclenaghan/scikit-learn,frank-tancf/scikit-learn,robin-lai/scikit-learn,Fireblend/scikit-learn,hdmetor/scikit-learn,mhue/scikit-learn,jmschrei/scikit-learn,bhargav/scikit-learn,ndingwall/scikit-learn,carrillo/scikit-learn,macks22/scikit-learn,hsiaoyi0504/scikit-learn,mayblue9/scikit-learn,466152112/scikit-learn,rohanp/scikit-learn,maheshakya/scikit-learn,krez13/scikit-learn,murali-munna/scikit-learn,iismd17/scikit-learn,Barmaley-exe/scikit-learn,CVML/scikit-learn,scikit-learn/scikit-learn,mugizico/scikit-learn,HolgerPeters/scikit-learn,jmschrei/scikit-learn,lenovor/scikit-learn,ndingwall/scikit-learn,altairpearl/scikit-learn,sumspr/scikit-learn,rrohan/scikit-learn,aetilley/scikit-learn,AlexandreAbraham/scikit-learn,dingocuster/scikit-learn,pratapvardhan/scikit-learn,joshloyal/scikit-learn,kjung/scikit-learn,mrshu/scikit-learn,bnaul/scikit-learn,yyjiang/scikit-learn,iismd17/scikit-learn,zaxtax/scikit-learn,Fireblend/scikit-learn,mojoboss/scikit-learn,jmschrei/scikit-learn,mikebenfield/scikit-learn,sergeyf/scikit-learn,NelisVerhoef/scikit-learn,BiaDarkia/scikit-learn,qifeigit/scikit-learn,ningchi/scikit-learn,icdishb/scikit-learn,CVML/scikit-learn,manashmndl/scikit-learn,simon-pepin/scikit-learn,mattilyra/scikit-learn,equialgo/scikit-learn,nvoron23/scikit-learn,466152112/scikit-learn,shangwuhencc/scikit-learn,q1ang/scikit-learn,AlexRobson/scikit-learn,beepee14/scikit-learn,larsmans/scikit-learn,kevin-intel/scikit-learn,liangz0707/scikit-learn,jm-begon/scikit-learn,IssamLaradji/scikit-learn,cl4rke/scikit-learn,AlexandreAbraham/scikit-learn,pythonvietnam/scikit-learn,jakobworldpeace/scikit-learn,imaculate/scikit-learn,pv/scikit-learn,dsullivan7/scikit-learn,betatim/scikit-learn,mfjb/scikit-learn,voxlol/scikit-learn,treycausey/scikit-learn,scikit-learn/scikit-learn,mjudsp/Tsallis,jblackburne/scikit-learn,PatrickOReilly/scikit-learn,aetilley/scikit-learn,ngoix/OCRF,costypetrisor/scikit-learn,pkruskal/scikit-learn,altairpearl/scikit-learn,tosolveit/scikit-learn,giorgiop/scikit-learn,RayMick/scikit-learn,PatrickOReilly/scikit-learn,eg-zhang/scikit-learn,nelson-liu/scikit-learn,alexeyum/scikit-learn,sanketloke/scikit-learn,spallavolu/scikit-learn,vigilv/scikit-learn,kagayakidan/scikit-learn,rishikksh20/scikit-learn,jseabold/scikit-learn,nesterione/scikit-learn,JsNoNo/scikit-learn,ishanic/scikit-learn,hlin117/scikit-learn,untom/scikit-learn,sgenoud/scikit-learn,davidgbe/scikit-learn,ankurankan/scikit-learn,djgagne/scikit-learn,jzt5132/scikit-learn,jmetzen/scikit-learn,BiaDarkia/scikit-learn,IndraVikas/scikit-learn,stylianos-kampakis/scikit-learn,kmike/scikit-learn,bthirion/scikit-learn,mxjl620/scikit-learn,ndingwall/scikit-learn,jpautom/scikit-learn,arabenjamin/scikit-learn,kmike/scikit-learn,RomainBrault/scikit-learn,ogrisel/scikit-learn,elkingtonmcb/scikit-learn,Myasuka/scikit-learn,ZenDevelopmentSystems/scikit-learn,466152112/scikit-learn,adamgreenhall/scikit-learn,xwolf12/scikit-learn,hainm/scikit-learn,sumspr/scikit-learn,nvoron23/scikit-learn,mikebenfield/scikit-learn,pv/scikit-learn,PatrickOReilly/scikit-learn,manashmndl/scikit-learn,sonnyhu/scikit-learn,MohammedWasim/scikit-learn,Clyde-fare/scikit-learn,iismd17/scikit-learn,quheng/scikit-learn,jpautom/scikit-learn,treycausey/scikit-learn,rajat1994/scikit-learn,pianomania/scikit-learn,joshloyal/scikit-learn,ky822/scikit-learn,zuku1985/scikit-learn,marcocaccin/scikit-learn,jpautom/scikit-learn,ngoix/OCRF,michigraber/scikit-learn,eg-zhang/scikit-learn,Myasuka/scikit-learn,amueller/scikit-learn,thientu/scikit-learn,fengzhyuan/scikit-learn,HolgerPeters/scikit-learn,alexeyum/scikit-learn,f3r/scikit-learn,shangwuhencc/scikit-learn,kevin-intel/scikit-learn,samuel1208/scikit-learn,raghavrv/scikit-learn,Jimmy-Morzaria/scikit-learn,RomainBrault/scikit-learn,vshtanko/scikit-learn,DSLituiev/scikit-learn,ilyes14/scikit-learn,davidgbe/scikit-learn,mjgrav2001/scikit-learn,glennq/scikit-learn,ivannz/scikit-learn,dsullivan7/scikit-learn,yyjiang/scikit-learn,kagayakidan/scikit-learn,henrykironde/scikit-learn,heli522/scikit-learn,zaxtax/scikit-learn,Myasuka/scikit-learn,rahul-c1/scikit-learn,ChanChiChoi/scikit-learn,fzalkow/scikit-learn,zuku1985/scikit-learn,larsmans/scikit-learn,russel1237/scikit-learn,Myasuka/scikit-learn,IshankGulati/scikit-learn,ChanChiChoi/scikit-learn,hrjn/scikit-learn,macks22/scikit-learn,walterreade/scikit-learn,carrillo/scikit-learn,IndraVikas/scikit-learn,mlyundin/scikit-learn,mjgrav2001/scikit-learn,zorroblue/scikit-learn,AlexanderFabisch/scikit-learn,billy-inn/scikit-learn,dsquareindia/scikit-learn,wanggang3333/scikit-learn,anirudhjayaraman/scikit-learn,ilo10/scikit-learn,btabibian/scikit-learn,kaichogami/scikit-learn,walterreade/scikit-learn,hlin117/scikit-learn,Aasmi/scikit-learn,akionakamura/scikit-learn,ClimbsRocks/scikit-learn,appapantula/scikit-learn,yonglehou/scikit-learn,toastedcornflakes/scikit-learn,ldirer/scikit-learn,treycausey/scikit-learn,fabianp/scikit-learn,3manuek/scikit-learn,waterponey/scikit-learn,lazywei/scikit-learn,dingocuster/scikit-learn,AIML/scikit-learn,shusenl/scikit-learn,kjung/scikit-learn,bikong2/scikit-learn,lenovor/scikit-learn,rrohan/scikit-learn,mrshu/scikit-learn,RomainBrault/scikit-learn,mhdella/scikit-learn,rexshihaoren/scikit-learn,zaxtax/scikit-learn,abhishekkrthakur/scikit-learn,marcocaccin/scikit-learn,henrykironde/scikit-learn,AnasGhrab/scikit-learn,pv/scikit-learn,meduz/scikit-learn,fredhusser/scikit-learn,pkruskal/scikit-learn,Obus/scikit-learn,IndraVikas/scikit-learn,ClimbsRocks/scikit-learn,henridwyer/scikit-learn,nmayorov/scikit-learn,arahuja/scikit-learn,eickenberg/scikit-learn,lazywei/scikit-learn,abimannans/scikit-learn,manhhomienbienthuy/scikit-learn,imaculate/scikit-learn,chrisburr/scikit-learn,pkruskal/scikit-learn,robbymeals/scikit-learn,MechCoder/scikit-learn,MatthieuBizien/scikit-learn,sgenoud/scikit-learn,ankurankan/scikit-learn,depet/scikit-learn,jzt5132/scikit-learn,sinhrks/scikit-learn,waterponey/scikit-learn,jlegendary/scikit-learn,bthirion/scikit-learn,kjung/scikit-learn,NelisVerhoef/scikit-learn,xubenben/scikit-learn,HolgerPeters/scikit-learn,sgenoud/scikit-learn,0asa/scikit-learn,mwv/scikit-learn,lbishal/scikit-learn,cybernet14/scikit-learn,Vimos/scikit-learn,jm-begon/scikit-learn,anirudhjayaraman/scikit-learn,pnedunuri/scikit-learn,yonglehou/scikit-learn,macks22/scikit-learn,vivekmishra1991/scikit-learn,yask123/scikit-learn,vshtanko/scikit-learn,mattgiguere/scikit-learn,betatim/scikit-learn,bigdataelephants/scikit-learn,lazywei/scikit-learn,fengzhyuan/scikit-learn,ElDeveloper/scikit-learn,Achuth17/scikit-learn,f3r/scikit-learn,siutanwong/scikit-learn,nikitasingh981/scikit-learn,RomainBrault/scikit-learn,xwolf12/scikit-learn,PatrickChrist/scikit-learn,Vimos/scikit-learn,bhargav/scikit-learn,vivekmishra1991/scikit-learn,thilbern/scikit-learn,liyu1990/sklearn,IshankGulati/scikit-learn,xubenben/scikit-learn,ky822/scikit-learn,Barmaley-exe/scikit-learn,trankmichael/scikit-learn,ldirer/scikit-learn,ngoix/OCRF,olologin/scikit-learn,frank-tancf/scikit-learn,jayflo/scikit-learn,costypetrisor/scikit-learn,Garrett-R/scikit-learn,abhishekgahlot/scikit-learn,fyffyt/scikit-learn,shikhardb/scikit-learn,btabibian/scikit-learn,trungnt13/scikit-learn,devanshdalal/scikit-learn,lenovor/scikit-learn,evgchz/scikit-learn,cainiaocome/scikit-learn,kylerbrown/scikit-learn,petosegan/scikit-learn,krez13/scikit-learn,0x0all/scikit-learn,tmhm/scikit-learn,voxlol/scikit-learn,xuewei4d/scikit-learn,nelson-liu/scikit-learn,B3AU/waveTree,rsivapr/scikit-learn,Clyde-fare/scikit-learn,glouppe/scikit-learn,ElDeveloper/scikit-learn,potash/scikit-learn,YinongLong/scikit-learn,aabadie/scikit-learn,cdegroc/scikit-learn,eg-zhang/scikit-learn,untom/scikit-learn,0x0all/scikit-learn,bikong2/scikit-learn,lin-credible/scikit-learn,djgagne/scikit-learn,RPGOne/scikit-learn,wzbozon/scikit-learn,aminert/scikit-learn,zorroblue/scikit-learn,rahul-c1/scikit-learn,jorge2703/scikit-learn,depet/scikit-learn,liyu1990/sklearn,nesterione/scikit-learn,RPGOne/scikit-learn,Sentient07/scikit-learn,appapantula/scikit-learn,alvarofierroclavero/scikit-learn,mugizico/scikit-learn,billy-inn/scikit-learn,arahuja/scikit-learn,aabadie/scikit-learn,YinongLong/scikit-learn,meduz/scikit-learn,florian-f/sklearn,ChanderG/scikit-learn,loli/semisupervisedforests,ChanChiChoi/scikit-learn,Barmaley-exe/scikit-learn,jkarnows/scikit-learn,hrjn/scikit-learn,nhejazi/scikit-learn,roxyboy/scikit-learn,trankmichael/scikit-learn,andaag/scikit-learn,h2educ/scikit-learn,shenzebang/scikit-learn,ltiao/scikit-learn,xzh86/scikit-learn,shenzebang/scikit-learn,bhargav/scikit-learn,larsmans/scikit-learn,appapantula/scikit-learn,zorojean/scikit-learn,mattgiguere/scikit-learn,abhishekkrthakur/scikit-learn,tomlof/scikit-learn,ankurankan/scikit-learn,davidgbe/scikit-learn,cybernet14/scikit-learn,shahankhatch/scikit-learn,dsullivan7/scikit-learn,procoder317/scikit-learn,rishikksh20/scikit-learn,YinongLong/scikit-learn,DSLituiev/scikit-learn,NunoEdgarGub1/scikit-learn,xiaoxiamii/scikit-learn,jorik041/scikit-learn,pypot/scikit-learn,idlead/scikit-learn,Titan-C/scikit-learn,UNR-AERIAL/scikit-learn,abhishekgahlot/scikit-learn,mhdella/scikit-learn,glouppe/scikit-learn,aewhatley/scikit-learn,plissonf/scikit-learn,zaxtax/scikit-learn,roxyboy/scikit-learn,olologin/scikit-learn,heli522/scikit-learn,nhejazi/scikit-learn,ahoyosid/scikit-learn,vinayak-mehta/scikit-learn,ilo10/scikit-learn,mattgiguere/scikit-learn,MartinDelzant/scikit-learn,moutai/scikit-learn,arjoly/scikit-learn,nesterione/scikit-learn,jaidevd/scikit-learn,elkingtonmcb/scikit-learn,ycaihua/scikit-learn,ilyes14/scikit-learn,ilo10/scikit-learn,TomDLT/scikit-learn,belltailjp/scikit-learn,ivannz/scikit-learn,LohithBlaze/scikit-learn,LiaoPan/scikit-learn,r-mart/scikit-learn,mfjb/scikit-learn,aminert/scikit-learn,hdmetor/scikit-learn,Akshay0724/scikit-learn,potash/scikit-learn,kaichogami/scikit-learn,trankmichael/scikit-learn,hdmetor/scikit-learn,jseabold/scikit-learn,ilyes14/scikit-learn,xavierwu/scikit-learn,CforED/Machine-Learning,anurag313/scikit-learn,MartinDelzant/scikit-learn,shahankhatch/scikit-learn,RayMick/scikit-learn,hainm/scikit-learn,lucidfrontier45/scikit-learn,gotomypc/scikit-learn,lucidfrontier45/scikit-learn,Akshay0724/scikit-learn,harshaneelhg/scikit-learn,loli/sklearn-ensembletrees,robbymeals/scikit-learn,MohammedWasim/scikit-learn,ngoix/OCRF,clemkoa/scikit-learn,Lawrence-Liu/scikit-learn,espg/scikit-learn,Windy-Ground/scikit-learn,zhenv5/scikit-learn,mattgiguere/scikit-learn,Garrett-R/scikit-learn,h2educ/scikit-learn,waterponey/scikit-learn,evgchz/scikit-learn,fzalkow/scikit-learn,RachitKansal/scikit-learn,treycausey/scikit-learn,mwv/scikit-learn,ltiao/scikit-learn,aewhatley/scikit-learn,hsuantien/scikit-learn,jm-begon/scikit-learn,kevin-intel/scikit-learn,rahuldhote/scikit-learn,fredhusser/scikit-learn,belltailjp/scikit-learn,manashmndl/scikit-learn,xyguo/scikit-learn,larsmans/scikit-learn,glouppe/scikit-learn,alexeyum/scikit-learn,PatrickChrist/scikit-learn,smartscheduling/scikit-learn-categorical-tree,yanlend/scikit-learn,hitszxp/scikit-learn,mattilyra/scikit-learn,OshynSong/scikit-learn,quheng/scikit-learn,wlamond/scikit-learn,RachitKansal/scikit-learn,PrashntS/scikit-learn,yunfeilu/scikit-learn,zihua/scikit-learn,nomadcube/scikit-learn,alexsavio/scikit-learn,jakirkham/scikit-learn,smartscheduling/scikit-learn-categorical-tree,moutai/scikit-learn,akionakamura/scikit-learn,lesteve/scikit-learn,Garrett-R/scikit-learn,jmetzen/scikit-learn,Windy-Ground/scikit-learn,AlexanderFabisch/scikit-learn,jayflo/scikit-learn,petosegan/scikit-learn,chrisburr/scikit-learn,qifeigit/scikit-learn,potash/scikit-learn,xzh86/scikit-learn,sergeyf/scikit-learn,mrshu/scikit-learn,fabioticconi/scikit-learn,Fireblend/scikit-learn,victorbergelin/scikit-learn,rajat1994/scikit-learn,tosolveit/scikit-learn,fabianp/scikit-learn,jjx02230808/project0223,sanketloke/scikit-learn,RPGOne/scikit-learn,andrewnc/scikit-learn,PrashntS/scikit-learn,xavierwu/scikit-learn,JPFrancoia/scikit-learn,zorroblue/scikit-learn,anntzer/scikit-learn,DonBeo/scikit-learn,theoryno3/scikit-learn,JeanKossaifi/scikit-learn,andaag/scikit-learn,mugizico/scikit-learn,MartinSavc/scikit-learn,abimannans/scikit-learn,JosmanPS/scikit-learn,mjudsp/Tsallis,bikong2/scikit-learn,ssaeger/scikit-learn,deepesch/scikit-learn,ishanic/scikit-learn,nikitasingh981/scikit-learn,ahoyosid/scikit-learn,mugizico/scikit-learn,bnaul/scikit-learn,cl4rke/scikit-learn,tomlof/scikit-learn,jakobworldpeace/scikit-learn,massmutual/scikit-learn,Windy-Ground/scikit-learn,henridwyer/scikit-learn,toastedcornflakes/scikit-learn,walterreade/scikit-learn,jakirkham/scikit-learn,nikitasingh981/scikit-learn,dhruv13J/scikit-learn,LiaoPan/scikit-learn,jmschrei/scikit-learn,fengzhyuan/scikit-learn,hdmetor/scikit-learn,mjudsp/Tsallis,chrsrds/scikit-learn,themrmax/scikit-learn,f3r/scikit-learn,jereze/scikit-learn,spallavolu/scikit-learn,florian-f/sklearn,zuku1985/scikit-learn,JosmanPS/scikit-learn,pratapvardhan/scikit-learn,scikit-learn/scikit-learn,ZenDevelopmentSystems/scikit-learn,phdowling/scikit-learn,nikitasingh981/scikit-learn,rexshihaoren/scikit-learn,IshankGulati/scikit-learn,pypot/scikit-learn,f3r/scikit-learn,tdhopper/scikit-learn,wazeerzulfikar/scikit-learn,JosmanPS/scikit-learn,glemaitre/scikit-learn,zihua/scikit-learn,bnaul/scikit-learn,themrmax/scikit-learn,btabibian/scikit-learn,Nyker510/scikit-learn,jorge2703/scikit-learn,espg/scikit-learn,raghavrv/scikit-learn,rohanp/scikit-learn,rvraghav93/scikit-learn,ephes/scikit-learn,nmayorov/scikit-learn,lesteve/scikit-learn,pianomania/scikit-learn,aabadie/scikit-learn,harshaneelhg/scikit-learn,JPFrancoia/scikit-learn,ahoyosid/scikit-learn,fbagirov/scikit-learn,ycaihua/scikit-learn,rajat1994/scikit-learn,shyamalschandra/scikit-learn,heli522/scikit-learn,yanlend/scikit-learn,AlexanderFabisch/scikit-learn,pythonvietnam/scikit-learn,wanggang3333/scikit-learn,justincassidy/scikit-learn,tdhopper/scikit-learn,joernhees/scikit-learn,costypetrisor/scikit-learn,ilo10/scikit-learn,0x0all/scikit-learn,Garrett-R/scikit-learn,sonnyhu/scikit-learn,loli/sklearn-ensembletrees,sinhrks/scikit-learn,Titan-C/scikit-learn,fabianp/scikit-learn,jzt5132/scikit-learn,jaidevd/scikit-learn,trungnt13/scikit-learn,wzbozon/scikit-learn,B3AU/waveTree,shangwuhencc/scikit-learn,aflaxman/scikit-learn,xzh86/scikit-learn,cauchycui/scikit-learn,hlin117/scikit-learn,h2educ/scikit-learn,thientu/scikit-learn,yunfeilu/scikit-learn,pypot/scikit-learn,zorojean/scikit-learn,CforED/Machine-Learning,Vimos/scikit-learn,giorgiop/scikit-learn,IshankGulati/scikit-learn,theoryno3/scikit-learn,dingocuster/scikit-learn,0asa/scikit-learn,PatrickChrist/scikit-learn,glennq/scikit-learn,rsivapr/scikit-learn,mayblue9/scikit-learn,r-mart/scikit-learn,zihua/scikit-learn,jakobworldpeace/scikit-learn,sinhrks/scikit-learn,meduz/scikit-learn,tawsifkhan/scikit-learn,aetilley/scikit-learn,MartinSavc/scikit-learn,adamgreenhall/scikit-learn,ogrisel/scikit-learn,poryfly/scikit-learn,ankurankan/scikit-learn,yanlend/scikit-learn,altairpearl/scikit-learn,etkirsch/scikit-learn,poryfly/scikit-learn,smartscheduling/scikit-learn-categorical-tree,abhishekkrthakur/scikit-learn,hugobowne/scikit-learn,kaichogami/scikit-learn,robin-lai/scikit-learn,russel1237/scikit-learn,arahuja/scikit-learn,vermouthmjl/scikit-learn,AIML/scikit-learn,ChanderG/scikit-learn,shyamalschandra/scikit-learn,mxjl620/scikit-learn,huobaowangxi/scikit-learn,madjelan/scikit-learn,vivekmishra1991/scikit-learn,moutai/scikit-learn,Vimos/scikit-learn,MechCoder/scikit-learn,JeanKossaifi/scikit-learn,phdowling/scikit-learn,ogrisel/scikit-learn,xiaoxiamii/scikit-learn,q1ang/scikit-learn,huobaowangxi/scikit-learn,Clyde-fare/scikit-learn,mattilyra/scikit-learn,robin-lai/scikit-learn,nrhine1/scikit-learn,harshaneelhg/scikit-learn,MartinDelzant/scikit-learn,vermouthmjl/scikit-learn,JsNoNo/scikit-learn,mehdidc/scikit-learn,Adai0808/scikit-learn,tmhm/scikit-learn,Nyker510/scikit-learn,roxyboy/scikit-learn,dingocuster/scikit-learn,bigdataelephants/scikit-learn,idlead/scikit-learn,h2educ/scikit-learn,NunoEdgarGub1/scikit-learn,equialgo/scikit-learn,marcocaccin/scikit-learn,IndraVikas/scikit-learn,toastedcornflakes/scikit-learn,jmetzen/scikit-learn,kagayakidan/scikit-learn,loli/semisupervisedforests,mblondel/scikit-learn,lucidfrontier45/scikit-learn,smartscheduling/scikit-learn-categorical-tree,henridwyer/scikit-learn,ndingwall/scikit-learn,mhdella/scikit-learn,mhue/scikit-learn,thientu/scikit-learn,r-mart/scikit-learn,jakirkham/scikit-learn,terkkila/scikit-learn,JeanKossaifi/scikit-learn,vortex-ape/scikit-learn,LohithBlaze/scikit-learn,manhhomienbienthuy/scikit-learn,shahankhatch/scikit-learn,bthirion/scikit-learn,lin-credible/scikit-learn,adamgreenhall/scikit-learn,Srisai85/scikit-learn,Sentient07/scikit-learn,terkkila/scikit-learn,ashhher3/scikit-learn,ldirer/scikit-learn,MechCoder/scikit-learn,justincassidy/scikit-learn,Fireblend/scikit-learn,fredhusser/scikit-learn,ZENGXH/scikit-learn,tawsifkhan/scikit-learn,LohithBlaze/scikit-learn,AlexandreAbraham/scikit-learn,manhhomienbienthuy/scikit-learn,belltailjp/scikit-learn,xiaoxiamii/scikit-learn,tmhm/scikit-learn,TomDLT/scikit-learn,glennq/scikit-learn,luo66/scikit-learn,liberatorqjw/scikit-learn,elkingtonmcb/scikit-learn,hitszxp/scikit-learn,fyffyt/scikit-learn,xwolf12/scikit-learn,0asa/scikit-learn,hsiaoyi0504/scikit-learn,aflaxman/scikit-learn,shangwuhencc/scikit-learn,3manuek/scikit-learn,nomadcube/scikit-learn,tosolveit/scikit-learn,Windy-Ground/scikit-learn,raghavrv/scikit-learn,rsivapr/scikit-learn,imaculate/scikit-learn,glemaitre/scikit-learn,aflaxman/scikit-learn,yyjiang/scikit-learn,ssaeger/scikit-learn,ZenDevelopmentSystems/scikit-learn,giorgiop/scikit-learn,ElDeveloper/scikit-learn,Achuth17/scikit-learn,meduz/scikit-learn,fyffyt/scikit-learn,ashhher3/scikit-learn,arjoly/scikit-learn,lesteve/scikit-learn,fabianp/scikit-learn,rajat1994/scikit-learn,procoder317/scikit-learn,Titan-C/scikit-learn,adamgreenhall/scikit-learn,cwu2011/scikit-learn,ZENGXH/scikit-learn,yonglehou/scikit-learn,alvarofierroclavero/scikit-learn,NunoEdgarGub1/scikit-learn,ishanic/scikit-learn,ElDeveloper/scikit-learn,jjx02230808/project0223,MohammedWasim/scikit-learn,chrsrds/scikit-learn,anurag313/scikit-learn,giorgiop/scikit-learn,alvarofierroclavero/scikit-learn,rrohan/scikit-learn,quheng/scikit-learn,yanlend/scikit-learn,yunfeilu/scikit-learn,evgchz/scikit-learn,pythonvietnam/scikit-learn,mjudsp/Tsallis,hsuantien/scikit-learn,luo66/scikit-learn,saiwing-yeung/scikit-learn,ogrisel/scikit-learn,CforED/Machine-Learning,loli/sklearn-ensembletrees,xuewei4d/scikit-learn,Djabbz/scikit-learn,liangz0707/scikit-learn,DSLituiev/scikit-learn,siutanwong/scikit-learn,ycaihua/scikit-learn,vshtanko/scikit-learn,altairpearl/scikit-learn,pianomania/scikit-learn,evgchz/scikit-learn,qifeigit/scikit-learn,gotomypc/scikit-learn,mojoboss/scikit-learn,sergeyf/scikit-learn,murali-munna/scikit-learn,DonBeo/scikit-learn,dsquareindia/scikit-learn,lbishal/scikit-learn,mxjl620/scikit-learn,jjx02230808/project0223,jaidevd/scikit-learn,MatthieuBizien/scikit-learn,jereze/scikit-learn,nomadcube/scikit-learn,liyu1990/sklearn,eickenberg/scikit-learn,andrewnc/scikit-learn,AlexRobson/scikit-learn,kmike/scikit-learn,PatrickChrist/scikit-learn,zorojean/scikit-learn,glouppe/scikit-learn,mikebenfield/scikit-learn
|
BUG: Make sure that joblib does get installed.
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
<commit_before><commit_msg>BUG: Make sure that joblib does get installed.<commit_after>
|
# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
BUG: Make sure that joblib does get installed.# -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
<commit_before><commit_msg>BUG: Make sure that joblib does get installed.<commit_after># -*- coding: utf-8 -*-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('externals',parent_package,top_path)
config.add_subpackage('joblib')
config.add_subpackage('joblib/test')
return config
|
|
612407d887ade72d7718b62c7a325821059bd9bc
|
scripts/restore_file.py
|
scripts/restore_file.py
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import os
import logging
from modularodm.exceptions import KeyExistsException
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import Guid
from website.files.models.base import TrashedFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def rename_file(file_node):
name, ext = os.path.splitext(file_node.name)
new_name = ''.join([name, ' (restored)', ext])
logger.info('Renaming {} to {}'.format(file_node.name, new_name))
file_node.name = new_name
name, ext = os.path.splitext(file_node.materialized_path)
new_mpath = ''.join([name, ' (restored)', ext])
logger.info('Changing materialized_path from {} to {}'.format(file_node.materialized_path, new_mpath))
file_node.materialized_path = new_mpath
file_node.save()
def restore_file(guid):
guid_obj = Guid.load(guid)
trashed_file_node = guid_obj.referent
assert isinstance(trashed_file_node, TrashedFileNode), 'Guid does not point to a trashedfilenode'
logger.info('Loaded trashedfilenode {}'.format(trashed_file_node._id))
try:
logger.info('Calling restore()')
trashed_file_node.restore()
except KeyExistsException: # File with same name exists; user most likely re-uploaded file
logger.warn('File with name {} exists. Renaming...'.format(trashed_file_node.name))
rename_file(trashed_file_node)
logger.info('Calling restore()')
trashed_file_node.restore()
return True
def main():
init_app(routes=False)
guid = sys.argv[1]
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
restore_file(guid)
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
Add script to restore deleted files
|
Add script to restore deleted files
|
Python
|
apache-2.0
|
rdhyee/osf.io,icereval/osf.io,alexschiller/osf.io,felliott/osf.io,emetsger/osf.io,saradbowman/osf.io,DanielSBrown/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,abought/osf.io,RomanZWang/osf.io,aaxelb/osf.io,wearpants/osf.io,sloria/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,mluke93/osf.io,sloria/osf.io,crcresearch/osf.io,mfraezz/osf.io,Nesiehr/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,caseyrollins/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,adlius/osf.io,brianjgeiger/osf.io,amyshi188/osf.io,chennan47/osf.io,cwisecarver/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,alexschiller/osf.io,Nesiehr/osf.io,SSJohns/osf.io,kch8qx/osf.io,zachjanicki/osf.io,mluke93/osf.io,pattisdr/osf.io,aaxelb/osf.io,alexschiller/osf.io,binoculars/osf.io,leb2dg/osf.io,icereval/osf.io,mluo613/osf.io,abought/osf.io,emetsger/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,hmoco/osf.io,baylee-d/osf.io,erinspace/osf.io,caseyrollins/osf.io,baylee-d/osf.io,DanielSBrown/osf.io,acshi/osf.io,hmoco/osf.io,cwisecarver/osf.io,zachjanicki/osf.io,kwierman/osf.io,chrisseto/osf.io,SSJohns/osf.io,cslzchen/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,chrisseto/osf.io,jnayak1/osf.io,RomanZWang/osf.io,leb2dg/osf.io,RomanZWang/osf.io,DanielSBrown/osf.io,hmoco/osf.io,monikagrabowska/osf.io,mattclark/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,chennan47/osf.io,acshi/osf.io,mluo613/osf.io,kwierman/osf.io,binoculars/osf.io,sloria/osf.io,acshi/osf.io,TomHeatwole/osf.io,laurenrevere/osf.io,rdhyee/osf.io,hmoco/osf.io,pattisdr/osf.io,cwisecarver/osf.io,kwierman/osf.io,SSJohns/osf.io,wearpants/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,mluo613/osf.io,kch8qx/osf.io,caneruguz/osf.io,leb2dg/osf.io,aaxelb/osf.io,amyshi188/osf.io,adlius/osf.io,DanielSBrown/osf.io,caseyrollins/osf.io,felliott/osf.io,amyshi188/osf.io,TomHeatwole/osf.io,jnayak1/osf.io,jnayak1/osf.io,zachjanicki/osf.io,jnayak1/osf.io,zamattiac/osf.io,brianjgeiger/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,emetsger/osf.io,alexschiller/osf.io,TomBaxter/osf.io,felliott/osf.io,caneruguz/osf.io,samchrisinger/osf.io,baylee-d/osf.io,wearpants/osf.io,icereval/osf.io,caneruguz/osf.io,SSJohns/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,binoculars/osf.io,leb2dg/osf.io,zamattiac/osf.io,mluke93/osf.io,chrisseto/osf.io,felliott/osf.io,monikagrabowska/osf.io,brianjgeiger/osf.io,wearpants/osf.io,kch8qx/osf.io,TomHeatwole/osf.io,chrisseto/osf.io,samchrisinger/osf.io,saradbowman/osf.io,emetsger/osf.io,rdhyee/osf.io,alexschiller/osf.io,abought/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,cslzchen/osf.io,rdhyee/osf.io,mluke93/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,acshi/osf.io,mfraezz/osf.io,amyshi188/osf.io,adlius/osf.io,abought/osf.io,chennan47/osf.io,kch8qx/osf.io,erinspace/osf.io,Johnetordoff/osf.io,zamattiac/osf.io,mluo613/osf.io,acshi/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,samchrisinger/osf.io,laurenrevere/osf.io,mfraezz/osf.io,zamattiac/osf.io,TomBaxter/osf.io,kwierman/osf.io,cwisecarver/osf.io,caneruguz/osf.io,mattclark/osf.io
|
Add script to restore deleted files
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import os
import logging
from modularodm.exceptions import KeyExistsException
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import Guid
from website.files.models.base import TrashedFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def rename_file(file_node):
name, ext = os.path.splitext(file_node.name)
new_name = ''.join([name, ' (restored)', ext])
logger.info('Renaming {} to {}'.format(file_node.name, new_name))
file_node.name = new_name
name, ext = os.path.splitext(file_node.materialized_path)
new_mpath = ''.join([name, ' (restored)', ext])
logger.info('Changing materialized_path from {} to {}'.format(file_node.materialized_path, new_mpath))
file_node.materialized_path = new_mpath
file_node.save()
def restore_file(guid):
guid_obj = Guid.load(guid)
trashed_file_node = guid_obj.referent
assert isinstance(trashed_file_node, TrashedFileNode), 'Guid does not point to a trashedfilenode'
logger.info('Loaded trashedfilenode {}'.format(trashed_file_node._id))
try:
logger.info('Calling restore()')
trashed_file_node.restore()
except KeyExistsException: # File with same name exists; user most likely re-uploaded file
logger.warn('File with name {} exists. Renaming...'.format(trashed_file_node.name))
rename_file(trashed_file_node)
logger.info('Calling restore()')
trashed_file_node.restore()
return True
def main():
init_app(routes=False)
guid = sys.argv[1]
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
restore_file(guid)
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to restore deleted files<commit_after>
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import os
import logging
from modularodm.exceptions import KeyExistsException
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import Guid
from website.files.models.base import TrashedFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def rename_file(file_node):
name, ext = os.path.splitext(file_node.name)
new_name = ''.join([name, ' (restored)', ext])
logger.info('Renaming {} to {}'.format(file_node.name, new_name))
file_node.name = new_name
name, ext = os.path.splitext(file_node.materialized_path)
new_mpath = ''.join([name, ' (restored)', ext])
logger.info('Changing materialized_path from {} to {}'.format(file_node.materialized_path, new_mpath))
file_node.materialized_path = new_mpath
file_node.save()
def restore_file(guid):
guid_obj = Guid.load(guid)
trashed_file_node = guid_obj.referent
assert isinstance(trashed_file_node, TrashedFileNode), 'Guid does not point to a trashedfilenode'
logger.info('Loaded trashedfilenode {}'.format(trashed_file_node._id))
try:
logger.info('Calling restore()')
trashed_file_node.restore()
except KeyExistsException: # File with same name exists; user most likely re-uploaded file
logger.warn('File with name {} exists. Renaming...'.format(trashed_file_node.name))
rename_file(trashed_file_node)
logger.info('Calling restore()')
trashed_file_node.restore()
return True
def main():
init_app(routes=False)
guid = sys.argv[1]
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
restore_file(guid)
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
Add script to restore deleted files# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import os
import logging
from modularodm.exceptions import KeyExistsException
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import Guid
from website.files.models.base import TrashedFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def rename_file(file_node):
name, ext = os.path.splitext(file_node.name)
new_name = ''.join([name, ' (restored)', ext])
logger.info('Renaming {} to {}'.format(file_node.name, new_name))
file_node.name = new_name
name, ext = os.path.splitext(file_node.materialized_path)
new_mpath = ''.join([name, ' (restored)', ext])
logger.info('Changing materialized_path from {} to {}'.format(file_node.materialized_path, new_mpath))
file_node.materialized_path = new_mpath
file_node.save()
def restore_file(guid):
guid_obj = Guid.load(guid)
trashed_file_node = guid_obj.referent
assert isinstance(trashed_file_node, TrashedFileNode), 'Guid does not point to a trashedfilenode'
logger.info('Loaded trashedfilenode {}'.format(trashed_file_node._id))
try:
logger.info('Calling restore()')
trashed_file_node.restore()
except KeyExistsException: # File with same name exists; user most likely re-uploaded file
logger.warn('File with name {} exists. Renaming...'.format(trashed_file_node.name))
rename_file(trashed_file_node)
logger.info('Calling restore()')
trashed_file_node.restore()
return True
def main():
init_app(routes=False)
guid = sys.argv[1]
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
restore_file(guid)
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to restore deleted files<commit_after># -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import os
import logging
from modularodm.exceptions import KeyExistsException
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import Guid
from website.files.models.base import TrashedFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def rename_file(file_node):
name, ext = os.path.splitext(file_node.name)
new_name = ''.join([name, ' (restored)', ext])
logger.info('Renaming {} to {}'.format(file_node.name, new_name))
file_node.name = new_name
name, ext = os.path.splitext(file_node.materialized_path)
new_mpath = ''.join([name, ' (restored)', ext])
logger.info('Changing materialized_path from {} to {}'.format(file_node.materialized_path, new_mpath))
file_node.materialized_path = new_mpath
file_node.save()
def restore_file(guid):
guid_obj = Guid.load(guid)
trashed_file_node = guid_obj.referent
assert isinstance(trashed_file_node, TrashedFileNode), 'Guid does not point to a trashedfilenode'
logger.info('Loaded trashedfilenode {}'.format(trashed_file_node._id))
try:
logger.info('Calling restore()')
trashed_file_node.restore()
except KeyExistsException: # File with same name exists; user most likely re-uploaded file
logger.warn('File with name {} exists. Renaming...'.format(trashed_file_node.name))
rename_file(trashed_file_node)
logger.info('Calling restore()')
trashed_file_node.restore()
return True
def main():
init_app(routes=False)
guid = sys.argv[1]
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
restore_file(guid)
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
|
d1cf10c0ad29f310a90d9ada7af4f9098344183a
|
src/data/retrieve_ned_files.py
|
src/data/retrieve_ned_files.py
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script retrieves the NED ArcFloat files from the National Elevation
# Map at 1 arc-second (~30m) resolution. It writes them into the data/ned
# directory. If the .zip files already exist, the script will only
# download files if they are newer than the local versions.
import ftputil
import os
import re
# Retrieve the desired NED zip files from the USGS FTP site.
def FindArcFloatFilenames(usgs):
files = usgs.listdir('vdelivery/Datasets/Staged/Elevation/1/GridFloat/')
print 'Found %d files in USGS ftp dir' % len(files)
matches = []
for f in files:
if re.match('USGS_NED_1_[ns]\d{1,3}[ew]\d{1,3}_GridFloat.zip$', f):
matches.append(f)
if re.match('[ns]\d{1,3}[ew]\d{1,3}.zip$', f):
matches.append(f)
print 'Found %d matching elevation tiles in USGS ftp dir' % len(matches)
return matches
# Fetch via FTP all the qualifying NED 1-arc-second tiles. Writes them
# to the current directory.
def RetrieveElevationTiles():
ned = ftputil.FTPHost('rockyftp.cr.usgs.gov', 'anonymous', '')
files = FindArcFloatFilenames(ned)
for f in files:
print 'Downloading %s' % f
ned.download_if_newer('vdelivery/Datasets/Staged/Elevation/1/GridFloat/' + f, f)
ned.close()
# Find the directory of this script.
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
dest = os.path.join(os.path.join(rootDir, 'data'), 'ned')
print 'Retrieving USGS 1-arc-second tiles to dir=%s' % dest
if not os.path.exists(dest):
os.makedirs(dest)
os.chdir(dest)
RetrieveElevationTiles()
|
Add a script to retrieve NED elevation map files.
|
Add a script to retrieve NED elevation map files.
Retrieves base tileset zip files and updates. According to the USGS
NED website, the longer filenames are updates to the base tileset.
These are frequently replacing existing tiles, but may also augment
the dataset by filling in gaps in the coverage.
The coverage is currently (fall 2016) over the continental US, HI,
and many US territories. There are some missing areas in AK.
|
Python
|
apache-2.0
|
Wireless-Innovation-Forum/Spectrum-Access-System,krlinus/Spectrum-Access-System,Wireless-Innovation-Forum/Spectrum-Access-System,gregbillock/Spectrum-Access-System,gregbillock/Spectrum-Access-System,gregbillock/Spectrum-Access-System,Wireless-Innovation-Forum/Spectrum-Access-System,Wireless-Innovation-Forum/Spectrum-Access-System,krlinus/Spectrum-Access-System,krlinus/Spectrum-Access-System,krlinus/Spectrum-Access-System,gregbillock/Spectrum-Access-System
|
Add a script to retrieve NED elevation map files.
Retrieves base tileset zip files and updates. According to the USGS
NED website, the longer filenames are updates to the base tileset.
These are frequently replacing existing tiles, but may also augment
the dataset by filling in gaps in the coverage.
The coverage is currently (fall 2016) over the continental US, HI,
and many US territories. There are some missing areas in AK.
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script retrieves the NED ArcFloat files from the National Elevation
# Map at 1 arc-second (~30m) resolution. It writes them into the data/ned
# directory. If the .zip files already exist, the script will only
# download files if they are newer than the local versions.
import ftputil
import os
import re
# Retrieve the desired NED zip files from the USGS FTP site.
def FindArcFloatFilenames(usgs):
files = usgs.listdir('vdelivery/Datasets/Staged/Elevation/1/GridFloat/')
print 'Found %d files in USGS ftp dir' % len(files)
matches = []
for f in files:
if re.match('USGS_NED_1_[ns]\d{1,3}[ew]\d{1,3}_GridFloat.zip$', f):
matches.append(f)
if re.match('[ns]\d{1,3}[ew]\d{1,3}.zip$', f):
matches.append(f)
print 'Found %d matching elevation tiles in USGS ftp dir' % len(matches)
return matches
# Fetch via FTP all the qualifying NED 1-arc-second tiles. Writes them
# to the current directory.
def RetrieveElevationTiles():
ned = ftputil.FTPHost('rockyftp.cr.usgs.gov', 'anonymous', '')
files = FindArcFloatFilenames(ned)
for f in files:
print 'Downloading %s' % f
ned.download_if_newer('vdelivery/Datasets/Staged/Elevation/1/GridFloat/' + f, f)
ned.close()
# Find the directory of this script.
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
dest = os.path.join(os.path.join(rootDir, 'data'), 'ned')
print 'Retrieving USGS 1-arc-second tiles to dir=%s' % dest
if not os.path.exists(dest):
os.makedirs(dest)
os.chdir(dest)
RetrieveElevationTiles()
|
<commit_before><commit_msg>Add a script to retrieve NED elevation map files.
Retrieves base tileset zip files and updates. According to the USGS
NED website, the longer filenames are updates to the base tileset.
These are frequently replacing existing tiles, but may also augment
the dataset by filling in gaps in the coverage.
The coverage is currently (fall 2016) over the continental US, HI,
and many US territories. There are some missing areas in AK.<commit_after>
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script retrieves the NED ArcFloat files from the National Elevation
# Map at 1 arc-second (~30m) resolution. It writes them into the data/ned
# directory. If the .zip files already exist, the script will only
# download files if they are newer than the local versions.
import ftputil
import os
import re
# Retrieve the desired NED zip files from the USGS FTP site.
def FindArcFloatFilenames(usgs):
files = usgs.listdir('vdelivery/Datasets/Staged/Elevation/1/GridFloat/')
print 'Found %d files in USGS ftp dir' % len(files)
matches = []
for f in files:
if re.match('USGS_NED_1_[ns]\d{1,3}[ew]\d{1,3}_GridFloat.zip$', f):
matches.append(f)
if re.match('[ns]\d{1,3}[ew]\d{1,3}.zip$', f):
matches.append(f)
print 'Found %d matching elevation tiles in USGS ftp dir' % len(matches)
return matches
# Fetch via FTP all the qualifying NED 1-arc-second tiles. Writes them
# to the current directory.
def RetrieveElevationTiles():
ned = ftputil.FTPHost('rockyftp.cr.usgs.gov', 'anonymous', '')
files = FindArcFloatFilenames(ned)
for f in files:
print 'Downloading %s' % f
ned.download_if_newer('vdelivery/Datasets/Staged/Elevation/1/GridFloat/' + f, f)
ned.close()
# Find the directory of this script.
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
dest = os.path.join(os.path.join(rootDir, 'data'), 'ned')
print 'Retrieving USGS 1-arc-second tiles to dir=%s' % dest
if not os.path.exists(dest):
os.makedirs(dest)
os.chdir(dest)
RetrieveElevationTiles()
|
Add a script to retrieve NED elevation map files.
Retrieves base tileset zip files and updates. According to the USGS
NED website, the longer filenames are updates to the base tileset.
These are frequently replacing existing tiles, but may also augment
the dataset by filling in gaps in the coverage.
The coverage is currently (fall 2016) over the continental US, HI,
and many US territories. There are some missing areas in AK.# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script retrieves the NED ArcFloat files from the National Elevation
# Map at 1 arc-second (~30m) resolution. It writes them into the data/ned
# directory. If the .zip files already exist, the script will only
# download files if they are newer than the local versions.
import ftputil
import os
import re
# Retrieve the desired NED zip files from the USGS FTP site.
def FindArcFloatFilenames(usgs):
files = usgs.listdir('vdelivery/Datasets/Staged/Elevation/1/GridFloat/')
print 'Found %d files in USGS ftp dir' % len(files)
matches = []
for f in files:
if re.match('USGS_NED_1_[ns]\d{1,3}[ew]\d{1,3}_GridFloat.zip$', f):
matches.append(f)
if re.match('[ns]\d{1,3}[ew]\d{1,3}.zip$', f):
matches.append(f)
print 'Found %d matching elevation tiles in USGS ftp dir' % len(matches)
return matches
# Fetch via FTP all the qualifying NED 1-arc-second tiles. Writes them
# to the current directory.
def RetrieveElevationTiles():
ned = ftputil.FTPHost('rockyftp.cr.usgs.gov', 'anonymous', '')
files = FindArcFloatFilenames(ned)
for f in files:
print 'Downloading %s' % f
ned.download_if_newer('vdelivery/Datasets/Staged/Elevation/1/GridFloat/' + f, f)
ned.close()
# Find the directory of this script.
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
dest = os.path.join(os.path.join(rootDir, 'data'), 'ned')
print 'Retrieving USGS 1-arc-second tiles to dir=%s' % dest
if not os.path.exists(dest):
os.makedirs(dest)
os.chdir(dest)
RetrieveElevationTiles()
|
<commit_before><commit_msg>Add a script to retrieve NED elevation map files.
Retrieves base tileset zip files and updates. According to the USGS
NED website, the longer filenames are updates to the base tileset.
These are frequently replacing existing tiles, but may also augment
the dataset by filling in gaps in the coverage.
The coverage is currently (fall 2016) over the continental US, HI,
and many US territories. There are some missing areas in AK.<commit_after># Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script retrieves the NED ArcFloat files from the National Elevation
# Map at 1 arc-second (~30m) resolution. It writes them into the data/ned
# directory. If the .zip files already exist, the script will only
# download files if they are newer than the local versions.
import ftputil
import os
import re
# Retrieve the desired NED zip files from the USGS FTP site.
def FindArcFloatFilenames(usgs):
files = usgs.listdir('vdelivery/Datasets/Staged/Elevation/1/GridFloat/')
print 'Found %d files in USGS ftp dir' % len(files)
matches = []
for f in files:
if re.match('USGS_NED_1_[ns]\d{1,3}[ew]\d{1,3}_GridFloat.zip$', f):
matches.append(f)
if re.match('[ns]\d{1,3}[ew]\d{1,3}.zip$', f):
matches.append(f)
print 'Found %d matching elevation tiles in USGS ftp dir' % len(matches)
return matches
# Fetch via FTP all the qualifying NED 1-arc-second tiles. Writes them
# to the current directory.
def RetrieveElevationTiles():
ned = ftputil.FTPHost('rockyftp.cr.usgs.gov', 'anonymous', '')
files = FindArcFloatFilenames(ned)
for f in files:
print 'Downloading %s' % f
ned.download_if_newer('vdelivery/Datasets/Staged/Elevation/1/GridFloat/' + f, f)
ned.close()
# Find the directory of this script.
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
dest = os.path.join(os.path.join(rootDir, 'data'), 'ned')
print 'Retrieving USGS 1-arc-second tiles to dir=%s' % dest
if not os.path.exists(dest):
os.makedirs(dest)
os.chdir(dest)
RetrieveElevationTiles()
|
|
0144fda08af4513fb03be59b4c3dc0a13373a806
|
via/views/blocker.py
|
via/views/blocker.py
|
"""View decorators to integrate with checkmate's API."""
import logging
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
logger = logging.getLogger(__name__)
def checkmate_block(view, url_param="url", allow_all=True):
"""Intended to be used as a decorator for pyramid views.
The view must accept a url a query param.
:param url_param name of the query param that contains the URL to check
:allow_all Check against checkmate's allow list (True) or not.
"""
def view_wrapper(context, request):
checkmate = CheckmateClient(request.registry.settings["checkmate_url"])
url = request.params[url_param]
blocked = None
try:
blocked = checkmate.check_url(url, allow_all=allow_all)
except CheckmateException:
logging.exception("Failed to check url against checkmate")
if blocked:
return HTTPTemporaryRedirect(location=blocked.presentation_url)
return view(context, request)
return view_wrapper
|
Add decorator to test url against checkamte
|
Add decorator to test url against checkamte
|
Python
|
bsd-2-clause
|
hypothesis/via,hypothesis/via,hypothesis/via
|
Add decorator to test url against checkamte
|
"""View decorators to integrate with checkmate's API."""
import logging
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
logger = logging.getLogger(__name__)
def checkmate_block(view, url_param="url", allow_all=True):
"""Intended to be used as a decorator for pyramid views.
The view must accept a url a query param.
:param url_param name of the query param that contains the URL to check
:allow_all Check against checkmate's allow list (True) or not.
"""
def view_wrapper(context, request):
checkmate = CheckmateClient(request.registry.settings["checkmate_url"])
url = request.params[url_param]
blocked = None
try:
blocked = checkmate.check_url(url, allow_all=allow_all)
except CheckmateException:
logging.exception("Failed to check url against checkmate")
if blocked:
return HTTPTemporaryRedirect(location=blocked.presentation_url)
return view(context, request)
return view_wrapper
|
<commit_before><commit_msg>Add decorator to test url against checkamte<commit_after>
|
"""View decorators to integrate with checkmate's API."""
import logging
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
logger = logging.getLogger(__name__)
def checkmate_block(view, url_param="url", allow_all=True):
"""Intended to be used as a decorator for pyramid views.
The view must accept a url a query param.
:param url_param name of the query param that contains the URL to check
:allow_all Check against checkmate's allow list (True) or not.
"""
def view_wrapper(context, request):
checkmate = CheckmateClient(request.registry.settings["checkmate_url"])
url = request.params[url_param]
blocked = None
try:
blocked = checkmate.check_url(url, allow_all=allow_all)
except CheckmateException:
logging.exception("Failed to check url against checkmate")
if blocked:
return HTTPTemporaryRedirect(location=blocked.presentation_url)
return view(context, request)
return view_wrapper
|
Add decorator to test url against checkamte"""View decorators to integrate with checkmate's API."""
import logging
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
logger = logging.getLogger(__name__)
def checkmate_block(view, url_param="url", allow_all=True):
"""Intended to be used as a decorator for pyramid views.
The view must accept a url a query param.
:param url_param name of the query param that contains the URL to check
:allow_all Check against checkmate's allow list (True) or not.
"""
def view_wrapper(context, request):
checkmate = CheckmateClient(request.registry.settings["checkmate_url"])
url = request.params[url_param]
blocked = None
try:
blocked = checkmate.check_url(url, allow_all=allow_all)
except CheckmateException:
logging.exception("Failed to check url against checkmate")
if blocked:
return HTTPTemporaryRedirect(location=blocked.presentation_url)
return view(context, request)
return view_wrapper
|
<commit_before><commit_msg>Add decorator to test url against checkamte<commit_after>"""View decorators to integrate with checkmate's API."""
import logging
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
logger = logging.getLogger(__name__)
def checkmate_block(view, url_param="url", allow_all=True):
"""Intended to be used as a decorator for pyramid views.
The view must accept a url a query param.
:param url_param name of the query param that contains the URL to check
:allow_all Check against checkmate's allow list (True) or not.
"""
def view_wrapper(context, request):
checkmate = CheckmateClient(request.registry.settings["checkmate_url"])
url = request.params[url_param]
blocked = None
try:
blocked = checkmate.check_url(url, allow_all=allow_all)
except CheckmateException:
logging.exception("Failed to check url against checkmate")
if blocked:
return HTTPTemporaryRedirect(location=blocked.presentation_url)
return view(context, request)
return view_wrapper
|
|
b6cabee2b7697b532e5ba625123e904ffd838d6e
|
data_analysis/analyze_results.py
|
data_analysis/analyze_results.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: analyze_results.py <result_data>
"""
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Useful way to run the algorithm over multiple files
#
# for file in `ls ~/Desktop/complete_results/*/*/*/*.csv | grep prediction.csv | sort`; do ./analyze_results.py $file; done;
#
if __name__ == "__main__":
arguments = docopt(__doc__)
file_path = arguments["<result_data>"]
results = pd.read_csv(file_path)
# Calculate some metrics
mse = mean_squared_error(results["incidence"].fillna(0), results["prediction"])
r2 = r2_score(results["incidence"].fillna(0), results["prediction"])
pcc = np.corrcoef(results["incidence"].fillna(0), results["prediction"], rowvar=False)[0][1]
print(file_path +","+ str(mse)+ "," +str(r2)+","+ str(pcc))
|
Add simple script to print some metrics.
|
[data_analysis] Add simple script to print some metrics.
|
Python
|
mit
|
geektoni/Influenza-Like-Illness-Predictor,geektoni/Influenza-Like-Illness-Predictor
|
[data_analysis] Add simple script to print some metrics.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: analyze_results.py <result_data>
"""
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Useful way to run the algorithm over multiple files
#
# for file in `ls ~/Desktop/complete_results/*/*/*/*.csv | grep prediction.csv | sort`; do ./analyze_results.py $file; done;
#
if __name__ == "__main__":
arguments = docopt(__doc__)
file_path = arguments["<result_data>"]
results = pd.read_csv(file_path)
# Calculate some metrics
mse = mean_squared_error(results["incidence"].fillna(0), results["prediction"])
r2 = r2_score(results["incidence"].fillna(0), results["prediction"])
pcc = np.corrcoef(results["incidence"].fillna(0), results["prediction"], rowvar=False)[0][1]
print(file_path +","+ str(mse)+ "," +str(r2)+","+ str(pcc))
|
<commit_before><commit_msg>[data_analysis] Add simple script to print some metrics.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: analyze_results.py <result_data>
"""
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Useful way to run the algorithm over multiple files
#
# for file in `ls ~/Desktop/complete_results/*/*/*/*.csv | grep prediction.csv | sort`; do ./analyze_results.py $file; done;
#
if __name__ == "__main__":
arguments = docopt(__doc__)
file_path = arguments["<result_data>"]
results = pd.read_csv(file_path)
# Calculate some metrics
mse = mean_squared_error(results["incidence"].fillna(0), results["prediction"])
r2 = r2_score(results["incidence"].fillna(0), results["prediction"])
pcc = np.corrcoef(results["incidence"].fillna(0), results["prediction"], rowvar=False)[0][1]
print(file_path +","+ str(mse)+ "," +str(r2)+","+ str(pcc))
|
[data_analysis] Add simple script to print some metrics.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: analyze_results.py <result_data>
"""
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Useful way to run the algorithm over multiple files
#
# for file in `ls ~/Desktop/complete_results/*/*/*/*.csv | grep prediction.csv | sort`; do ./analyze_results.py $file; done;
#
if __name__ == "__main__":
arguments = docopt(__doc__)
file_path = arguments["<result_data>"]
results = pd.read_csv(file_path)
# Calculate some metrics
mse = mean_squared_error(results["incidence"].fillna(0), results["prediction"])
r2 = r2_score(results["incidence"].fillna(0), results["prediction"])
pcc = np.corrcoef(results["incidence"].fillna(0), results["prediction"], rowvar=False)[0][1]
print(file_path +","+ str(mse)+ "," +str(r2)+","+ str(pcc))
|
<commit_before><commit_msg>[data_analysis] Add simple script to print some metrics.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: analyze_results.py <result_data>
"""
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Useful way to run the algorithm over multiple files
#
# for file in `ls ~/Desktop/complete_results/*/*/*/*.csv | grep prediction.csv | sort`; do ./analyze_results.py $file; done;
#
if __name__ == "__main__":
arguments = docopt(__doc__)
file_path = arguments["<result_data>"]
results = pd.read_csv(file_path)
# Calculate some metrics
mse = mean_squared_error(results["incidence"].fillna(0), results["prediction"])
r2 = r2_score(results["incidence"].fillna(0), results["prediction"])
pcc = np.corrcoef(results["incidence"].fillna(0), results["prediction"], rowvar=False)[0][1]
print(file_path +","+ str(mse)+ "," +str(r2)+","+ str(pcc))
|
|
4c91252a47b8c3e096897d994df1fc5e4d6c9a9f
|
databroker/tests/test_queries.py
|
databroker/tests/test_queries.py
|
import pickle
import pytest
from ..queries import TimeRange
def test_time_range():
empty = TimeRange()
assert empty == dict(empty) == empty.query == {}
assert TimeRange(**empty.kwargs) == empty
since = TimeRange(since='2020')
expected = {'time': {'$gte': 1577854800.0}}
assert since == dict(since) == since.query == expected
assert TimeRange(**since.kwargs) == since
until = TimeRange(until='2020')
expected = {'time': {'$lt': 1577854800.0}}
assert until == dict(until) == until.query == expected
assert TimeRange(**until.kwargs) == until
both = TimeRange(since='2020', until='2021')
expected = {'time': {'$gte': 1577854800.0, '$lt': 1609477200.0}}
assert both == dict(both) == both.query == expected
assert TimeRange(**both.kwargs) == both
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since='2021', until='2020')
with_tz = TimeRange(since='2020-01-01 9:52', timezone='Europe/Amsterdam')
expected = {'time': {'$gte': 1577868720.0}}
assert with_tz == dict(with_tz) == with_tz.query == expected
assert TimeRange(**with_tz.kwargs) == with_tz
def test_replace():
"Test the Query.replace() method using TimeRange."
original = TimeRange(since='2020', until='2021')
clone = original.replace()
assert original == clone
replaced = original.replace(since='1999')
assert replaced != original
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since='2020-01-01 9:52')
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
|
Add tests for query objects.
|
Add tests for query objects.
|
Python
|
bsd-3-clause
|
ericdill/databroker,ericdill/databroker
|
Add tests for query objects.
|
import pickle
import pytest
from ..queries import TimeRange
def test_time_range():
empty = TimeRange()
assert empty == dict(empty) == empty.query == {}
assert TimeRange(**empty.kwargs) == empty
since = TimeRange(since='2020')
expected = {'time': {'$gte': 1577854800.0}}
assert since == dict(since) == since.query == expected
assert TimeRange(**since.kwargs) == since
until = TimeRange(until='2020')
expected = {'time': {'$lt': 1577854800.0}}
assert until == dict(until) == until.query == expected
assert TimeRange(**until.kwargs) == until
both = TimeRange(since='2020', until='2021')
expected = {'time': {'$gte': 1577854800.0, '$lt': 1609477200.0}}
assert both == dict(both) == both.query == expected
assert TimeRange(**both.kwargs) == both
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since='2021', until='2020')
with_tz = TimeRange(since='2020-01-01 9:52', timezone='Europe/Amsterdam')
expected = {'time': {'$gte': 1577868720.0}}
assert with_tz == dict(with_tz) == with_tz.query == expected
assert TimeRange(**with_tz.kwargs) == with_tz
def test_replace():
"Test the Query.replace() method using TimeRange."
original = TimeRange(since='2020', until='2021')
clone = original.replace()
assert original == clone
replaced = original.replace(since='1999')
assert replaced != original
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since='2020-01-01 9:52')
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
|
<commit_before><commit_msg>Add tests for query objects.<commit_after>
|
import pickle
import pytest
from ..queries import TimeRange
def test_time_range():
empty = TimeRange()
assert empty == dict(empty) == empty.query == {}
assert TimeRange(**empty.kwargs) == empty
since = TimeRange(since='2020')
expected = {'time': {'$gte': 1577854800.0}}
assert since == dict(since) == since.query == expected
assert TimeRange(**since.kwargs) == since
until = TimeRange(until='2020')
expected = {'time': {'$lt': 1577854800.0}}
assert until == dict(until) == until.query == expected
assert TimeRange(**until.kwargs) == until
both = TimeRange(since='2020', until='2021')
expected = {'time': {'$gte': 1577854800.0, '$lt': 1609477200.0}}
assert both == dict(both) == both.query == expected
assert TimeRange(**both.kwargs) == both
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since='2021', until='2020')
with_tz = TimeRange(since='2020-01-01 9:52', timezone='Europe/Amsterdam')
expected = {'time': {'$gte': 1577868720.0}}
assert with_tz == dict(with_tz) == with_tz.query == expected
assert TimeRange(**with_tz.kwargs) == with_tz
def test_replace():
"Test the Query.replace() method using TimeRange."
original = TimeRange(since='2020', until='2021')
clone = original.replace()
assert original == clone
replaced = original.replace(since='1999')
assert replaced != original
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since='2020-01-01 9:52')
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
|
Add tests for query objects.import pickle
import pytest
from ..queries import TimeRange
def test_time_range():
empty = TimeRange()
assert empty == dict(empty) == empty.query == {}
assert TimeRange(**empty.kwargs) == empty
since = TimeRange(since='2020')
expected = {'time': {'$gte': 1577854800.0}}
assert since == dict(since) == since.query == expected
assert TimeRange(**since.kwargs) == since
until = TimeRange(until='2020')
expected = {'time': {'$lt': 1577854800.0}}
assert until == dict(until) == until.query == expected
assert TimeRange(**until.kwargs) == until
both = TimeRange(since='2020', until='2021')
expected = {'time': {'$gte': 1577854800.0, '$lt': 1609477200.0}}
assert both == dict(both) == both.query == expected
assert TimeRange(**both.kwargs) == both
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since='2021', until='2020')
with_tz = TimeRange(since='2020-01-01 9:52', timezone='Europe/Amsterdam')
expected = {'time': {'$gte': 1577868720.0}}
assert with_tz == dict(with_tz) == with_tz.query == expected
assert TimeRange(**with_tz.kwargs) == with_tz
def test_replace():
"Test the Query.replace() method using TimeRange."
original = TimeRange(since='2020', until='2021')
clone = original.replace()
assert original == clone
replaced = original.replace(since='1999')
assert replaced != original
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since='2020-01-01 9:52')
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
|
<commit_before><commit_msg>Add tests for query objects.<commit_after>import pickle
import pytest
from ..queries import TimeRange
def test_time_range():
empty = TimeRange()
assert empty == dict(empty) == empty.query == {}
assert TimeRange(**empty.kwargs) == empty
since = TimeRange(since='2020')
expected = {'time': {'$gte': 1577854800.0}}
assert since == dict(since) == since.query == expected
assert TimeRange(**since.kwargs) == since
until = TimeRange(until='2020')
expected = {'time': {'$lt': 1577854800.0}}
assert until == dict(until) == until.query == expected
assert TimeRange(**until.kwargs) == until
both = TimeRange(since='2020', until='2021')
expected = {'time': {'$gte': 1577854800.0, '$lt': 1609477200.0}}
assert both == dict(both) == both.query == expected
assert TimeRange(**both.kwargs) == both
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since='2021', until='2020')
with_tz = TimeRange(since='2020-01-01 9:52', timezone='Europe/Amsterdam')
expected = {'time': {'$gte': 1577868720.0}}
assert with_tz == dict(with_tz) == with_tz.query == expected
assert TimeRange(**with_tz.kwargs) == with_tz
def test_replace():
"Test the Query.replace() method using TimeRange."
original = TimeRange(since='2020', until='2021')
clone = original.replace()
assert original == clone
replaced = original.replace(since='1999')
assert replaced != original
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since='2020-01-01 9:52')
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
|
|
3639f1677199da5df01930d3f2a806582e208aad
|
test/test_bad_hostgroup.py
|
test/test_bad_hostgroup.py
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assert_(not self.conf.conf_is_correct)
self.assert_(self.any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG"))
self.assert_(not self.any_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG"))
if __name__ == '__main__':
unittest.main()
|
Test - Add test for fix
|
Enh: Test - Add test for fix
|
Python
|
agpl-3.0
|
tal-nino/shinken,mohierf/shinken,staute/shinken_package,dfranco/shinken,titilambert/alignak,naparuba/shinken,lets-software/shinken,savoirfairelinux/shinken,Aimage/shinken,rednach/krill,KerkhoffTechnologies/shinken,tal-nino/shinken,rledisez/shinken,savoirfairelinux/shinken,Simage/shinken,peeyush-tm/shinken,peeyush-tm/shinken,KerkhoffTechnologies/shinken,fpeyre/shinken,Aimage/shinken,mohierf/shinken,peeyush-tm/shinken,peeyush-tm/shinken,KerkhoffTechnologies/shinken,Aimage/shinken,KerkhoffTechnologies/shinken,kaji-project/shinken,mohierf/shinken,Simage/shinken,ddurieux/alignak,ddurieux/alignak,Simage/shinken,kaji-project/shinken,gst/alignak,Aimage/shinken,naparuba/shinken,claneys/shinken,tal-nino/shinken,staute/shinken_package,Simage/shinken,tal-nino/shinken,dfranco/shinken,staute/shinken_deb,kaji-project/shinken,rledisez/shinken,ddurieux/alignak,fpeyre/shinken,claneys/shinken,titilambert/alignak,staute/shinken_package,lets-software/shinken,mohierf/shinken,staute/shinken_package,tal-nino/shinken,staute/shinken_deb,mohierf/shinken,geektophe/shinken,fpeyre/shinken,Alignak-monitoring/alignak,claneys/shinken,claneys/shinken,naparuba/shinken,staute/shinken_deb,ddurieux/alignak,titilambert/alignak,peeyush-tm/shinken,fpeyre/shinken,geektophe/shinken,peeyush-tm/shinken,kaji-project/shinken,staute/shinken_package,mohierf/shinken,kaji-project/shinken,naparuba/shinken,rledisez/shinken,dfranco/shinken,fpeyre/shinken,staute/shinken_deb,savoirfairelinux/shinken,savoirfairelinux/shinken,KerkhoffTechnologies/shinken,dfranco/shinken,staute/shinken_package,titilambert/alignak,KerkhoffTechnologies/shinken,gst/alignak,gst/alignak,geektophe/shinken,dfranco/shinken,rednach/krill,fpeyre/shinken,rledisez/shinken,savoirfairelinux/shinken,Alignak-monitoring/alignak,Aimage/shinken,rledisez/shinken,Simage/shinken,claneys/shinken,lets-software/shinken,Aimage/shinken,geektophe/shinken,rednach/krill,ddurieux/alignak,lets-software/shinken,geektophe/shinken,dfranco/shinken,rednach/krill,naparuba/shinken,rednach/krill,claneys/shinken,naparuba/shinken,gst/alignak,kaji-project/shinken,rednach/krill,tal-nino/shinken,lets-software/shinken,rledisez/shinken,staute/shinken_deb,geektophe/shinken,kaji-project/shinken,ddurieux/alignak,staute/shinken_deb,savoirfairelinux/shinken,Simage/shinken,lets-software/shinken
|
Enh: Test - Add test for fix
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assert_(not self.conf.conf_is_correct)
self.assert_(self.any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG"))
self.assert_(not self.any_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG"))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Enh: Test - Add test for fix<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assert_(not self.conf.conf_is_correct)
self.assert_(self.any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG"))
self.assert_(not self.any_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG"))
if __name__ == '__main__':
unittest.main()
|
Enh: Test - Add test for fix#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assert_(not self.conf.conf_is_correct)
self.assert_(self.any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG"))
self.assert_(not self.any_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG"))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Enh: Test - Add test for fix<commit_after>#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Sebastien Coavoux, s.coavoux@free.frc
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestBadHostGroupConf(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_bad_hg_conf.cfg')
def test_bad_conf(self):
self.assert_(not self.conf.conf_is_correct)
self.assert_(self.any_log_match("itemgroup::.* as hostgroup, got unknown member BADMEMBERHG"))
self.assert_(not self.any_log_match("itemgroup::.* as servicegroup, got unknown member BADMEMBERHG"))
if __name__ == '__main__':
unittest.main()
|
|
ff1d6d56cfb04cd382739b1399826c124fb7b31a
|
find-bug.py
|
find-bug.py
|
#!/usr/bin/env python
import argparse, tempfile, sys
from replace_imports import include_imports
parser = argparse.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=True, default=False,
help='display some extra information')
parser.add_argument('--fast', dest='fast',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
def write_to_file(file_name, contents):
try:
with open(file_name, 'w', encoding='UTF-8') as f:
f.write(contents)
except TypeError:
with open(file_name, 'w') as f:
f.write(contents)
if __name__ == '__main__':
args = parser.parse_args()
bug_file_name = bug_file.name
output_file_name = args.output_file.name
temp_file_name = args.temp_file
verbose = args.verbose
fast = args.fast
if bug_file_name[-2:] != '.v':
print('Error: BUGGY_FILE must end in .v (value: %s)' % bug_file_name)
sys.exit(1)
if output_file_name[-2:] != '.v':
print('Error: OUT_FILE must end in .v (value: %s)' % output_file_name)
sys.exit(1)
if temp_file_name == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
temp_file_name = temp_file.name
temp_file.close()
if temp_file_name[-2:] != '.v':
print('Error: TEMP_FILE must end in .v (value: %s)' % temp_file_name)
sys.exit(1)
print('First, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, verbose=verbose, fast=fast)
if inlined_contents:
write_to_file(output_file_name, inlined_contents)
else:
print('Failed to inline inputs.')
sys.exit(1)
print('Now, I will attempt to coq the file, and find the error...')
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
Write a top-level file to wrap the bug-finding process
|
Write a top-level file to wrap the bug-finding process
|
Python
|
mit
|
JasonGross/coq-tools,JasonGross/coq-tools
|
Write a top-level file to wrap the bug-finding process
|
#!/usr/bin/env python
import argparse, tempfile, sys
from replace_imports import include_imports
parser = argparse.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=True, default=False,
help='display some extra information')
parser.add_argument('--fast', dest='fast',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
def write_to_file(file_name, contents):
try:
with open(file_name, 'w', encoding='UTF-8') as f:
f.write(contents)
except TypeError:
with open(file_name, 'w') as f:
f.write(contents)
if __name__ == '__main__':
args = parser.parse_args()
bug_file_name = bug_file.name
output_file_name = args.output_file.name
temp_file_name = args.temp_file
verbose = args.verbose
fast = args.fast
if bug_file_name[-2:] != '.v':
print('Error: BUGGY_FILE must end in .v (value: %s)' % bug_file_name)
sys.exit(1)
if output_file_name[-2:] != '.v':
print('Error: OUT_FILE must end in .v (value: %s)' % output_file_name)
sys.exit(1)
if temp_file_name == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
temp_file_name = temp_file.name
temp_file.close()
if temp_file_name[-2:] != '.v':
print('Error: TEMP_FILE must end in .v (value: %s)' % temp_file_name)
sys.exit(1)
print('First, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, verbose=verbose, fast=fast)
if inlined_contents:
write_to_file(output_file_name, inlined_contents)
else:
print('Failed to inline inputs.')
sys.exit(1)
print('Now, I will attempt to coq the file, and find the error...')
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
<commit_before><commit_msg>Write a top-level file to wrap the bug-finding process<commit_after>
|
#!/usr/bin/env python
import argparse, tempfile, sys
from replace_imports import include_imports
parser = argparse.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=True, default=False,
help='display some extra information')
parser.add_argument('--fast', dest='fast',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
def write_to_file(file_name, contents):
try:
with open(file_name, 'w', encoding='UTF-8') as f:
f.write(contents)
except TypeError:
with open(file_name, 'w') as f:
f.write(contents)
if __name__ == '__main__':
args = parser.parse_args()
bug_file_name = bug_file.name
output_file_name = args.output_file.name
temp_file_name = args.temp_file
verbose = args.verbose
fast = args.fast
if bug_file_name[-2:] != '.v':
print('Error: BUGGY_FILE must end in .v (value: %s)' % bug_file_name)
sys.exit(1)
if output_file_name[-2:] != '.v':
print('Error: OUT_FILE must end in .v (value: %s)' % output_file_name)
sys.exit(1)
if temp_file_name == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
temp_file_name = temp_file.name
temp_file.close()
if temp_file_name[-2:] != '.v':
print('Error: TEMP_FILE must end in .v (value: %s)' % temp_file_name)
sys.exit(1)
print('First, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, verbose=verbose, fast=fast)
if inlined_contents:
write_to_file(output_file_name, inlined_contents)
else:
print('Failed to inline inputs.')
sys.exit(1)
print('Now, I will attempt to coq the file, and find the error...')
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
Write a top-level file to wrap the bug-finding process#!/usr/bin/env python
import argparse, tempfile, sys
from replace_imports import include_imports
parser = argparse.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=True, default=False,
help='display some extra information')
parser.add_argument('--fast', dest='fast',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
def write_to_file(file_name, contents):
try:
with open(file_name, 'w', encoding='UTF-8') as f:
f.write(contents)
except TypeError:
with open(file_name, 'w') as f:
f.write(contents)
if __name__ == '__main__':
args = parser.parse_args()
bug_file_name = bug_file.name
output_file_name = args.output_file.name
temp_file_name = args.temp_file
verbose = args.verbose
fast = args.fast
if bug_file_name[-2:] != '.v':
print('Error: BUGGY_FILE must end in .v (value: %s)' % bug_file_name)
sys.exit(1)
if output_file_name[-2:] != '.v':
print('Error: OUT_FILE must end in .v (value: %s)' % output_file_name)
sys.exit(1)
if temp_file_name == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
temp_file_name = temp_file.name
temp_file.close()
if temp_file_name[-2:] != '.v':
print('Error: TEMP_FILE must end in .v (value: %s)' % temp_file_name)
sys.exit(1)
print('First, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, verbose=verbose, fast=fast)
if inlined_contents:
write_to_file(output_file_name, inlined_contents)
else:
print('Failed to inline inputs.')
sys.exit(1)
print('Now, I will attempt to coq the file, and find the error...')
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
<commit_before><commit_msg>Write a top-level file to wrap the bug-finding process<commit_after>#!/usr/bin/env python
import argparse, tempfile, sys
from replace_imports import include_imports
parser = argparse.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=True, default=False,
help='display some extra information')
parser.add_argument('--fast', dest='fast',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
def write_to_file(file_name, contents):
try:
with open(file_name, 'w', encoding='UTF-8') as f:
f.write(contents)
except TypeError:
with open(file_name, 'w') as f:
f.write(contents)
if __name__ == '__main__':
args = parser.parse_args()
bug_file_name = bug_file.name
output_file_name = args.output_file.name
temp_file_name = args.temp_file
verbose = args.verbose
fast = args.fast
if bug_file_name[-2:] != '.v':
print('Error: BUGGY_FILE must end in .v (value: %s)' % bug_file_name)
sys.exit(1)
if output_file_name[-2:] != '.v':
print('Error: OUT_FILE must end in .v (value: %s)' % output_file_name)
sys.exit(1)
if temp_file_name == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
temp_file_name = temp_file.name
temp_file.close()
if temp_file_name[-2:] != '.v':
print('Error: TEMP_FILE must end in .v (value: %s)' % temp_file_name)
sys.exit(1)
print('First, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, verbose=verbose, fast=fast)
if inlined_contents:
write_to_file(output_file_name, inlined_contents)
else:
print('Failed to inline inputs.')
sys.exit(1)
print('Now, I will attempt to coq the file, and find the error...')
if os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
|
77dad0076c177244e58d13a0593a6091f2c90340
|
scikits/statsmodels/examples/tsa/ex_dates.py
|
scikits/statsmodels/examples/tsa/ex_dates.py
|
import scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
Add examples for using dates with TSA models
|
ENH: Add examples for using dates with TSA models
|
Python
|
bsd-3-clause
|
josef-pkt/statsmodels,hainm/statsmodels,huongttlan/statsmodels,musically-ut/statsmodels,wzbozon/statsmodels,DonBeo/statsmodels,bavardage/statsmodels,Averroes/statsmodels,jseabold/statsmodels,kiyoto/statsmodels,bert9bert/statsmodels,gef756/statsmodels,wzbozon/statsmodels,wwf5067/statsmodels,wesm/statsmodels,bsipocz/statsmodels,phobson/statsmodels,hainm/statsmodels,kiyoto/statsmodels,DonBeo/statsmodels,rgommers/statsmodels,josef-pkt/statsmodels,edhuckle/statsmodels,bashtage/statsmodels,bzero/statsmodels,bzero/statsmodels,gef756/statsmodels,waynenilsen/statsmodels,yarikoptic/pystatsmodels,wzbozon/statsmodels,cbmoore/statsmodels,yl565/statsmodels,wdurhamh/statsmodels,DonBeo/statsmodels,phobson/statsmodels,rgommers/statsmodels,josef-pkt/statsmodels,ChadFulton/statsmodels,nvoron23/statsmodels,hainm/statsmodels,nvoron23/statsmodels,cbmoore/statsmodels,nguyentu1602/statsmodels,wzbozon/statsmodels,bavardage/statsmodels,saketkc/statsmodels,huongttlan/statsmodels,wkfwkf/statsmodels,wkfwkf/statsmodels,bashtage/statsmodels,musically-ut/statsmodels,wwf5067/statsmodels,YihaoLu/statsmodels,phobson/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,DonBeo/statsmodels,phobson/statsmodels,waynenilsen/statsmodels,josef-pkt/statsmodels,alekz112/statsmodels,wzbozon/statsmodels,saketkc/statsmodels,nguyentu1602/statsmodels,jstoxrocky/statsmodels,adammenges/statsmodels,rgommers/statsmodels,rgommers/statsmodels,gef756/statsmodels,YihaoLu/statsmodels,alekz112/statsmodels,musically-ut/statsmodels,bashtage/statsmodels,cbmoore/statsmodels,statsmodels/statsmodels,hlin117/statsmodels,astocko/statsmodels,yl565/statsmodels,wesm/statsmodels,wdurhamh/statsmodels,bert9bert/statsmodels,bavardage/statsmodels,huongttlan/statsmodels,jseabold/statsmodels,nguyentu1602/statsmodels,bavardage/statsmodels,nvoron23/statsmodels,nvoron23/statsmodels,cbmoore/statsmodels,alekz112/statsmodels,bsipocz/statsmodels,Averroes/statsmodels,bashtage/statsmodels,bert9bert/statsmodels,alekz112/statsmodels,bashtage/statsmodels,kiyoto/statsmodels,pprett/statsmodels,rgommers/statsmodels,bzero/statsmodels,adammenges/statsmodels,ChadFulton/statsmodels,wwf5067/statsmodels,wdurhamh/statsmodels,statsmodels/statsmodels,jstoxrocky/statsmodels,waynenilsen/statsmodels,pprett/statsmodels,gef756/statsmodels,wkfwkf/statsmodels,YihaoLu/statsmodels,waynenilsen/statsmodels,wdurhamh/statsmodels,detrout/debian-statsmodels,bavardage/statsmodels,edhuckle/statsmodels,jseabold/statsmodels,adammenges/statsmodels,bsipocz/statsmodels,pprett/statsmodels,yarikoptic/pystatsmodels,bzero/statsmodels,edhuckle/statsmodels,wwf5067/statsmodels,pprett/statsmodels,yl565/statsmodels,wkfwkf/statsmodels,bsipocz/statsmodels,bert9bert/statsmodels,cbmoore/statsmodels,saketkc/statsmodels,huongttlan/statsmodels,ChadFulton/statsmodels,musically-ut/statsmodels,statsmodels/statsmodels,astocko/statsmodels,edhuckle/statsmodels,jseabold/statsmodels,detrout/debian-statsmodels,kiyoto/statsmodels,phobson/statsmodels,kiyoto/statsmodels,jstoxrocky/statsmodels,josef-pkt/statsmodels,jstoxrocky/statsmodels,bert9bert/statsmodels,edhuckle/statsmodels,detrout/debian-statsmodels,yl565/statsmodels,YihaoLu/statsmodels,hlin117/statsmodels,adammenges/statsmodels,ChadFulton/statsmodels,statsmodels/statsmodels,gef756/statsmodels,astocko/statsmodels,saketkc/statsmodels,jseabold/statsmodels,nvoron23/statsmodels,wesm/statsmodels,bzero/statsmodels,wdurhamh/statsmodels,astocko/statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,wkfwkf/statsmodels,josef-pkt/statsmodels,hlin117/statsmodels,saketkc/statsmodels,hlin117/statsmodels,detrout/debian-statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,yl565/statsmodels,yarikoptic/pystatsmodels,Averroes/statsmodels,DonBeo/statsmodels,Averroes/statsmodels,hainm/statsmodels,nguyentu1602/statsmodels
|
ENH: Add examples for using dates with TSA models
|
import scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
<commit_before><commit_msg>ENH: Add examples for using dates with TSA models<commit_after>
|
import scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
ENH: Add examples for using dates with TSA modelsimport scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
<commit_before><commit_msg>ENH: Add examples for using dates with TSA models<commit_after>import scikits.statsmodels.api as sm
import numpy as np
import pandas
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
# We can use scikits.timeseries and datetime to create this array.
import datetime
import scikits.timeseries as ts
dates = ts.date_array(start_date=1700, length=len(data.endog), freq='A')
# To make an array of datetime types, we need an integer array of ordinals
#.. from datetime import datetime
#.. dt_dates = dates.toordinal().astype(int)
#.. dt_dates = np.asarray([datetime.fromordinal(i) for i in dt_dates])
dt_dates = dates.tolist()
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pandas.Series(data.endog, index=dt_dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print pred
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dt_dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print pred
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print ar_res._data.predict_dates
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
# Using scikits.timeseries
# ------------------------
ts_data = ts.time_series(data.endog, dates=dates)
ts_ar_model = sm.tsa.AR(ts_data, freq='A')
ts_ar_res = ts_ar_model.fit(maxlag=9)
# Using Larry
# -----------
import la
larr = la.larry(data.endog, [dt_dates])
la_ar_model = sm.tsa.AR(larr, freq='A')
la_ar_res = la_ar_model.fit(maxlag=9)
|
|
5b23140f2b9e6f24ddf6162cb0b110640bc3b203
|
Primes/pollard_rho.py
|
Primes/pollard_rho.py
|
import random
def gcd( a, b):
if(b == 0): return a;
return gcd(b, a % b);
def pollardRho(N):
if N%2==0:
return 2
x = random.randint(1, N-1)
y = x
c = random.randint(1, N-1)
g = 1
while g==1:
x = ((x*x)%N+c)%N
y = ((y*y)%N+c)%N
y = ((y*y)%N+c)%N
g = gcd(abs(x-y),N)
return g
print(pollardRho(10967535067))
|
Add implementation pollardrho in python.
|
Add implementation pollardrho in python.
|
Python
|
mit
|
xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book
|
Add implementation pollardrho in python.
|
import random
def gcd( a, b):
if(b == 0): return a;
return gcd(b, a % b);
def pollardRho(N):
if N%2==0:
return 2
x = random.randint(1, N-1)
y = x
c = random.randint(1, N-1)
g = 1
while g==1:
x = ((x*x)%N+c)%N
y = ((y*y)%N+c)%N
y = ((y*y)%N+c)%N
g = gcd(abs(x-y),N)
return g
print(pollardRho(10967535067))
|
<commit_before><commit_msg>Add implementation pollardrho in python.<commit_after>
|
import random
def gcd( a, b):
if(b == 0): return a;
return gcd(b, a % b);
def pollardRho(N):
if N%2==0:
return 2
x = random.randint(1, N-1)
y = x
c = random.randint(1, N-1)
g = 1
while g==1:
x = ((x*x)%N+c)%N
y = ((y*y)%N+c)%N
y = ((y*y)%N+c)%N
g = gcd(abs(x-y),N)
return g
print(pollardRho(10967535067))
|
Add implementation pollardrho in python.import random
def gcd( a, b):
if(b == 0): return a;
return gcd(b, a % b);
def pollardRho(N):
if N%2==0:
return 2
x = random.randint(1, N-1)
y = x
c = random.randint(1, N-1)
g = 1
while g==1:
x = ((x*x)%N+c)%N
y = ((y*y)%N+c)%N
y = ((y*y)%N+c)%N
g = gcd(abs(x-y),N)
return g
print(pollardRho(10967535067))
|
<commit_before><commit_msg>Add implementation pollardrho in python.<commit_after>import random
def gcd( a, b):
if(b == 0): return a;
return gcd(b, a % b);
def pollardRho(N):
if N%2==0:
return 2
x = random.randint(1, N-1)
y = x
c = random.randint(1, N-1)
g = 1
while g==1:
x = ((x*x)%N+c)%N
y = ((y*y)%N+c)%N
y = ((y*y)%N+c)%N
g = gcd(abs(x-y),N)
return g
print(pollardRho(10967535067))
|
|
cc1f60fc7b97ab7daa5bc26699667898b4c683cd
|
tests/relative_url_test.py
|
tests/relative_url_test.py
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import app
from . import runwsgi
from . import util
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_get_relative(self):
self.curl.get('/success')
self.assertEqual('success', self.curl.body())
|
Test coverage for relative urls in curl module
|
Test coverage for relative urls in curl module
|
Python
|
lgpl-2.1
|
p/pycurl-archived,pycurl/pycurl,pycurl/pycurl,p/pycurl-archived,pycurl/pycurl,p/pycurl-archived
|
Test coverage for relative urls in curl module
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import app
from . import runwsgi
from . import util
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_get_relative(self):
self.curl.get('/success')
self.assertEqual('success', self.curl.body())
|
<commit_before><commit_msg>Test coverage for relative urls in curl module<commit_after>
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import app
from . import runwsgi
from . import util
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_get_relative(self):
self.curl.get('/success')
self.assertEqual('success', self.curl.body())
|
Test coverage for relative urls in curl module#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import app
from . import runwsgi
from . import util
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_get_relative(self):
self.curl.get('/success')
self.assertEqual('success', self.curl.body())
|
<commit_before><commit_msg>Test coverage for relative urls in curl module<commit_after>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import app
from . import runwsgi
from . import util
setup_module, teardown_module = runwsgi.app_runner_setup((app.app, 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_get_relative(self):
self.curl.get('/success')
self.assertEqual('success', self.curl.body())
|
|
b3f4dc3b1799edc9ba1260f09fda90916be43ecd
|
get_file.py
|
get_file.py
|
#!/usr/bin/env python
"""
Download file from Thingiverse.com
"""
import argparse
import os.path
import requests
from thingiverse_crawler import get_download_link
from download_model import download_single_file
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--output-dir", help="output directory", default="./");
parser.add_argument("file_id");
return parser.parse_args();
def main():
args = parse_args();
link = get_download_link(args.file_id);
__, ext = os.path.splitext(link);
output_file = "{}{}".format(args.file_id, ext.lower());
output_file = os.path.join(args.output_dir, output_file);
download_single_file([output_file, link]);
if __name__ == "__main__":
main();
|
Add script to download single file from file_id.
|
Add script to download single file from file_id.
|
Python
|
mit
|
qnzhou/ThingiverseCrawler
|
Add script to download single file from file_id.
|
#!/usr/bin/env python
"""
Download file from Thingiverse.com
"""
import argparse
import os.path
import requests
from thingiverse_crawler import get_download_link
from download_model import download_single_file
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--output-dir", help="output directory", default="./");
parser.add_argument("file_id");
return parser.parse_args();
def main():
args = parse_args();
link = get_download_link(args.file_id);
__, ext = os.path.splitext(link);
output_file = "{}{}".format(args.file_id, ext.lower());
output_file = os.path.join(args.output_dir, output_file);
download_single_file([output_file, link]);
if __name__ == "__main__":
main();
|
<commit_before><commit_msg>Add script to download single file from file_id.<commit_after>
|
#!/usr/bin/env python
"""
Download file from Thingiverse.com
"""
import argparse
import os.path
import requests
from thingiverse_crawler import get_download_link
from download_model import download_single_file
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--output-dir", help="output directory", default="./");
parser.add_argument("file_id");
return parser.parse_args();
def main():
args = parse_args();
link = get_download_link(args.file_id);
__, ext = os.path.splitext(link);
output_file = "{}{}".format(args.file_id, ext.lower());
output_file = os.path.join(args.output_dir, output_file);
download_single_file([output_file, link]);
if __name__ == "__main__":
main();
|
Add script to download single file from file_id.#!/usr/bin/env python
"""
Download file from Thingiverse.com
"""
import argparse
import os.path
import requests
from thingiverse_crawler import get_download_link
from download_model import download_single_file
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--output-dir", help="output directory", default="./");
parser.add_argument("file_id");
return parser.parse_args();
def main():
args = parse_args();
link = get_download_link(args.file_id);
__, ext = os.path.splitext(link);
output_file = "{}{}".format(args.file_id, ext.lower());
output_file = os.path.join(args.output_dir, output_file);
download_single_file([output_file, link]);
if __name__ == "__main__":
main();
|
<commit_before><commit_msg>Add script to download single file from file_id.<commit_after>#!/usr/bin/env python
"""
Download file from Thingiverse.com
"""
import argparse
import os.path
import requests
from thingiverse_crawler import get_download_link
from download_model import download_single_file
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--output-dir", help="output directory", default="./");
parser.add_argument("file_id");
return parser.parse_args();
def main():
args = parse_args();
link = get_download_link(args.file_id);
__, ext = os.path.splitext(link);
output_file = "{}{}".format(args.file_id, ext.lower());
output_file = os.path.join(args.output_dir, output_file);
download_single_file([output_file, link]);
if __name__ == "__main__":
main();
|
|
5cf0e4369a62e89e34dbe5ea862a7fb5342cc32e
|
python/merge-kml-files/merge-kml-files.py
|
python/merge-kml-files/merge-kml-files.py
|
#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
|
Add script for merging KML files
|
Add script for merging KML files
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script for merging KML files
|
#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for merging KML files<commit_after>
|
#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
|
Add script for merging KML files#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for merging KML files<commit_after>#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
|
|
dfa76a4ad4a15e4068135b5f82ef5a00763c4b57
|
open_humans/models.py
|
open_humans/models.py
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User)
about_me = models.TextField()
@receiver(post_save, sender=User, dispatch_uid='create_profile')
def cb_create_profile(sender, instance, created, raw, **kwargs):
"""
Create an account for the newly created user.
"""
# If we're loading a user via a fixture then `raw` will be true and in that
# case we won't want to create a Profile to go with it
if created and not raw:
Profile.objects.create(user=instance)
|
Add Profile model and post-save hook
|
Add Profile model and post-save hook
|
Python
|
mit
|
OpenHumans/open-humans,OpenHumans/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,PersonalGenomesOrg/open-humans
|
Add Profile model and post-save hook
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User)
about_me = models.TextField()
@receiver(post_save, sender=User, dispatch_uid='create_profile')
def cb_create_profile(sender, instance, created, raw, **kwargs):
"""
Create an account for the newly created user.
"""
# If we're loading a user via a fixture then `raw` will be true and in that
# case we won't want to create a Profile to go with it
if created and not raw:
Profile.objects.create(user=instance)
|
<commit_before><commit_msg>Add Profile model and post-save hook<commit_after>
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User)
about_me = models.TextField()
@receiver(post_save, sender=User, dispatch_uid='create_profile')
def cb_create_profile(sender, instance, created, raw, **kwargs):
"""
Create an account for the newly created user.
"""
# If we're loading a user via a fixture then `raw` will be true and in that
# case we won't want to create a Profile to go with it
if created and not raw:
Profile.objects.create(user=instance)
|
Add Profile model and post-save hookfrom django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User)
about_me = models.TextField()
@receiver(post_save, sender=User, dispatch_uid='create_profile')
def cb_create_profile(sender, instance, created, raw, **kwargs):
"""
Create an account for the newly created user.
"""
# If we're loading a user via a fixture then `raw` will be true and in that
# case we won't want to create a Profile to go with it
if created and not raw:
Profile.objects.create(user=instance)
|
<commit_before><commit_msg>Add Profile model and post-save hook<commit_after>from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User)
about_me = models.TextField()
@receiver(post_save, sender=User, dispatch_uid='create_profile')
def cb_create_profile(sender, instance, created, raw, **kwargs):
"""
Create an account for the newly created user.
"""
# If we're loading a user via a fixture then `raw` will be true and in that
# case we won't want to create a Profile to go with it
if created and not raw:
Profile.objects.create(user=instance)
|
|
2f9bd2a08160724144f0db00994213bdd2a741f5
|
proselint/checks/wallace/tense_present.py
|
proselint/checks/wallace/tense_present.py
|
# -*- coding: utf-8 -*-
"""DFW201: Tense present.
---
layout: post
error_code: MAU103
source: DFW's Tense Present
source_url: http://bit.ly/1c85lgR
title: Tense present
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import memoize
import re
@memoize
def check(text):
err = "DFW201"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
"and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(i, text, flags=re.UNICODE | re.IGNORECASE):
txt = m.group(0).strip()
errors.append((m.start(), m.end(), err, msg.format(txt)))
return errors
|
Add some examples from tense present
|
Add some examples from tense present
|
Python
|
bsd-3-clause
|
jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint
|
Add some examples from tense present
|
# -*- coding: utf-8 -*-
"""DFW201: Tense present.
---
layout: post
error_code: MAU103
source: DFW's Tense Present
source_url: http://bit.ly/1c85lgR
title: Tense present
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import memoize
import re
@memoize
def check(text):
err = "DFW201"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
"and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(i, text, flags=re.UNICODE | re.IGNORECASE):
txt = m.group(0).strip()
errors.append((m.start(), m.end(), err, msg.format(txt)))
return errors
|
<commit_before><commit_msg>Add some examples from tense present<commit_after>
|
# -*- coding: utf-8 -*-
"""DFW201: Tense present.
---
layout: post
error_code: MAU103
source: DFW's Tense Present
source_url: http://bit.ly/1c85lgR
title: Tense present
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import memoize
import re
@memoize
def check(text):
err = "DFW201"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
"and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(i, text, flags=re.UNICODE | re.IGNORECASE):
txt = m.group(0).strip()
errors.append((m.start(), m.end(), err, msg.format(txt)))
return errors
|
Add some examples from tense present# -*- coding: utf-8 -*-
"""DFW201: Tense present.
---
layout: post
error_code: MAU103
source: DFW's Tense Present
source_url: http://bit.ly/1c85lgR
title: Tense present
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import memoize
import re
@memoize
def check(text):
err = "DFW201"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
"and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(i, text, flags=re.UNICODE | re.IGNORECASE):
txt = m.group(0).strip()
errors.append((m.start(), m.end(), err, msg.format(txt)))
return errors
|
<commit_before><commit_msg>Add some examples from tense present<commit_after># -*- coding: utf-8 -*-
"""DFW201: Tense present.
---
layout: post
error_code: MAU103
source: DFW's Tense Present
source_url: http://bit.ly/1c85lgR
title: Tense present
date: 2014-06-10 12:31:19
categories: writing
---
Archaism.
"""
from proselint.tools import memoize
import re
@memoize
def check(text):
err = "DFW201"
msg = u"'{}'."
illogics = [
u"up to \d{1,3}% ?[-\u2014\u2013]{0,3} ?(?:or|and) more\W?",
"between you and I",
"on accident",
"somewhat of a",
"all it's own",
"reason is because",
"audible to the ear",
"in regards to",
"would of",
"and so",
"i ?(?:feel|am feeling|am|'m|'m feeling) nauseous",
]
errors = []
for i in illogics:
for m in re.finditer(i, text, flags=re.UNICODE | re.IGNORECASE):
txt = m.group(0).strip()
errors.append((m.start(), m.end(), err, msg.format(txt)))
return errors
|
|
81cbf67fb637cf69bb47d43024eb9c860ae472e9
|
test/test_borg.py
|
test/test_borg.py
|
import pytest
from borg_summon import borg
def test_hook_no_command():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.hook({})
assert str(excinfo.value) == 'The "command" option is required for hooks.'
def test_borg_invalid_log_level():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'log_level': 'null', 'location': 'location'}, 'remote', 'repo')
assert 'not a legal log level' in str(excinfo.value)
def test_borg_no_location():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({}, 'remote', 'repo')
assert str(excinfo.value).startswith('No location specified for remote')
def test_borg_invalid_encryption():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'encryption': 'caesar', 'location': 'location'}, 'remote', 'repo')
assert 'is not a valid encryption mode' in str(excinfo.value)
def test_borg_create_no_paths():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.create({'location': 'location'}, 'remote', 'repo_name', 'archive_name')
assert str(excinfo.value).startswith('There are no existing paths to backup')
|
Add tests for errors in borg.py
|
Add tests for errors in borg.py
|
Python
|
mit
|
grensjo/borg-summon
|
Add tests for errors in borg.py
|
import pytest
from borg_summon import borg
def test_hook_no_command():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.hook({})
assert str(excinfo.value) == 'The "command" option is required for hooks.'
def test_borg_invalid_log_level():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'log_level': 'null', 'location': 'location'}, 'remote', 'repo')
assert 'not a legal log level' in str(excinfo.value)
def test_borg_no_location():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({}, 'remote', 'repo')
assert str(excinfo.value).startswith('No location specified for remote')
def test_borg_invalid_encryption():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'encryption': 'caesar', 'location': 'location'}, 'remote', 'repo')
assert 'is not a valid encryption mode' in str(excinfo.value)
def test_borg_create_no_paths():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.create({'location': 'location'}, 'remote', 'repo_name', 'archive_name')
assert str(excinfo.value).startswith('There are no existing paths to backup')
|
<commit_before><commit_msg>Add tests for errors in borg.py<commit_after>
|
import pytest
from borg_summon import borg
def test_hook_no_command():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.hook({})
assert str(excinfo.value) == 'The "command" option is required for hooks.'
def test_borg_invalid_log_level():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'log_level': 'null', 'location': 'location'}, 'remote', 'repo')
assert 'not a legal log level' in str(excinfo.value)
def test_borg_no_location():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({}, 'remote', 'repo')
assert str(excinfo.value).startswith('No location specified for remote')
def test_borg_invalid_encryption():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'encryption': 'caesar', 'location': 'location'}, 'remote', 'repo')
assert 'is not a valid encryption mode' in str(excinfo.value)
def test_borg_create_no_paths():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.create({'location': 'location'}, 'remote', 'repo_name', 'archive_name')
assert str(excinfo.value).startswith('There are no existing paths to backup')
|
Add tests for errors in borg.pyimport pytest
from borg_summon import borg
def test_hook_no_command():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.hook({})
assert str(excinfo.value) == 'The "command" option is required for hooks.'
def test_borg_invalid_log_level():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'log_level': 'null', 'location': 'location'}, 'remote', 'repo')
assert 'not a legal log level' in str(excinfo.value)
def test_borg_no_location():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({}, 'remote', 'repo')
assert str(excinfo.value).startswith('No location specified for remote')
def test_borg_invalid_encryption():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'encryption': 'caesar', 'location': 'location'}, 'remote', 'repo')
assert 'is not a valid encryption mode' in str(excinfo.value)
def test_borg_create_no_paths():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.create({'location': 'location'}, 'remote', 'repo_name', 'archive_name')
assert str(excinfo.value).startswith('There are no existing paths to backup')
|
<commit_before><commit_msg>Add tests for errors in borg.py<commit_after>import pytest
from borg_summon import borg
def test_hook_no_command():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.hook({})
assert str(excinfo.value) == 'The "command" option is required for hooks.'
def test_borg_invalid_log_level():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'log_level': 'null', 'location': 'location'}, 'remote', 'repo')
assert 'not a legal log level' in str(excinfo.value)
def test_borg_no_location():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({}, 'remote', 'repo')
assert str(excinfo.value).startswith('No location specified for remote')
def test_borg_invalid_encryption():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.init({'encryption': 'caesar', 'location': 'location'}, 'remote', 'repo')
assert 'is not a valid encryption mode' in str(excinfo.value)
def test_borg_create_no_paths():
with pytest.raises(borg.InvalidConfigError) as excinfo:
borg.create({'location': 'location'}, 'remote', 'repo_name', 'archive_name')
assert str(excinfo.value).startswith('There are no existing paths to backup')
|
|
713658c3dfa2ff70511057868d44647e5f67d065
|
tests/model/test_range.py
|
tests/model/test_range.py
|
# Copyright (c) 2021. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from datetime import datetime, timedelta
import pytz
from sqlalchemy import Column, literal, cast, TEXT
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from pycroft.helpers.interval import open, closed, openclosed, closedopen
from pycroft.model import session
from pycroft.model.base import IntegerIdModel
from pycroft.model.types import TsTzRange
from tests import SQLAlchemyTestCase
class TestTable(IntegerIdModel):
value = Column(TsTzRange)
NOW = datetime.utcnow().replace(tzinfo=pytz.utc)
class TestTsTzRange(SQLAlchemyTestCase):
session: Session = session.session
def test_select_as_text(self):
stmt = select(cast(literal(open(None, None), TsTzRange), TEXT))
assert self.session.scalar(stmt) == '(,)'
def test_declarative_insert_and_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
closed(NOW, NOW + timedelta(days=1)),
open(NOW, NOW + timedelta(days=1)),
open(None, None),
open(None, NOW),
openclosed(None, NOW),
closedopen(None, NOW),
]:
with self.subTest(interval=interval):
mem = TestTable(value=interval)
self.session.add(mem)
self.session.commit()
assert mem.value == interval
def test_literal_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
]:
with self.subTest(interval=interval):
stmt = select(cast(literal(interval, TsTzRange), TsTzRange))
assert self.session.scalar(stmt) == interval
|
Add unittest for `TsTzRange` (de-)serialization of `Interval`s
|
Add unittest for `TsTzRange` (de-)serialization of `Interval`s
Refs #160
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
Add unittest for `TsTzRange` (de-)serialization of `Interval`s
Refs #160
|
# Copyright (c) 2021. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from datetime import datetime, timedelta
import pytz
from sqlalchemy import Column, literal, cast, TEXT
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from pycroft.helpers.interval import open, closed, openclosed, closedopen
from pycroft.model import session
from pycroft.model.base import IntegerIdModel
from pycroft.model.types import TsTzRange
from tests import SQLAlchemyTestCase
class TestTable(IntegerIdModel):
value = Column(TsTzRange)
NOW = datetime.utcnow().replace(tzinfo=pytz.utc)
class TestTsTzRange(SQLAlchemyTestCase):
session: Session = session.session
def test_select_as_text(self):
stmt = select(cast(literal(open(None, None), TsTzRange), TEXT))
assert self.session.scalar(stmt) == '(,)'
def test_declarative_insert_and_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
closed(NOW, NOW + timedelta(days=1)),
open(NOW, NOW + timedelta(days=1)),
open(None, None),
open(None, NOW),
openclosed(None, NOW),
closedopen(None, NOW),
]:
with self.subTest(interval=interval):
mem = TestTable(value=interval)
self.session.add(mem)
self.session.commit()
assert mem.value == interval
def test_literal_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
]:
with self.subTest(interval=interval):
stmt = select(cast(literal(interval, TsTzRange), TsTzRange))
assert self.session.scalar(stmt) == interval
|
<commit_before><commit_msg>Add unittest for `TsTzRange` (de-)serialization of `Interval`s
Refs #160<commit_after>
|
# Copyright (c) 2021. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from datetime import datetime, timedelta
import pytz
from sqlalchemy import Column, literal, cast, TEXT
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from pycroft.helpers.interval import open, closed, openclosed, closedopen
from pycroft.model import session
from pycroft.model.base import IntegerIdModel
from pycroft.model.types import TsTzRange
from tests import SQLAlchemyTestCase
class TestTable(IntegerIdModel):
value = Column(TsTzRange)
NOW = datetime.utcnow().replace(tzinfo=pytz.utc)
class TestTsTzRange(SQLAlchemyTestCase):
session: Session = session.session
def test_select_as_text(self):
stmt = select(cast(literal(open(None, None), TsTzRange), TEXT))
assert self.session.scalar(stmt) == '(,)'
def test_declarative_insert_and_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
closed(NOW, NOW + timedelta(days=1)),
open(NOW, NOW + timedelta(days=1)),
open(None, None),
open(None, NOW),
openclosed(None, NOW),
closedopen(None, NOW),
]:
with self.subTest(interval=interval):
mem = TestTable(value=interval)
self.session.add(mem)
self.session.commit()
assert mem.value == interval
def test_literal_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
]:
with self.subTest(interval=interval):
stmt = select(cast(literal(interval, TsTzRange), TsTzRange))
assert self.session.scalar(stmt) == interval
|
Add unittest for `TsTzRange` (de-)serialization of `Interval`s
Refs #160# Copyright (c) 2021. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from datetime import datetime, timedelta
import pytz
from sqlalchemy import Column, literal, cast, TEXT
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from pycroft.helpers.interval import open, closed, openclosed, closedopen
from pycroft.model import session
from pycroft.model.base import IntegerIdModel
from pycroft.model.types import TsTzRange
from tests import SQLAlchemyTestCase
class TestTable(IntegerIdModel):
value = Column(TsTzRange)
NOW = datetime.utcnow().replace(tzinfo=pytz.utc)
class TestTsTzRange(SQLAlchemyTestCase):
session: Session = session.session
def test_select_as_text(self):
stmt = select(cast(literal(open(None, None), TsTzRange), TEXT))
assert self.session.scalar(stmt) == '(,)'
def test_declarative_insert_and_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
closed(NOW, NOW + timedelta(days=1)),
open(NOW, NOW + timedelta(days=1)),
open(None, None),
open(None, NOW),
openclosed(None, NOW),
closedopen(None, NOW),
]:
with self.subTest(interval=interval):
mem = TestTable(value=interval)
self.session.add(mem)
self.session.commit()
assert mem.value == interval
def test_literal_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
]:
with self.subTest(interval=interval):
stmt = select(cast(literal(interval, TsTzRange), TsTzRange))
assert self.session.scalar(stmt) == interval
|
<commit_before><commit_msg>Add unittest for `TsTzRange` (de-)serialization of `Interval`s
Refs #160<commit_after># Copyright (c) 2021. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
from datetime import datetime, timedelta
import pytz
from sqlalchemy import Column, literal, cast, TEXT
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from pycroft.helpers.interval import open, closed, openclosed, closedopen
from pycroft.model import session
from pycroft.model.base import IntegerIdModel
from pycroft.model.types import TsTzRange
from tests import SQLAlchemyTestCase
class TestTable(IntegerIdModel):
value = Column(TsTzRange)
NOW = datetime.utcnow().replace(tzinfo=pytz.utc)
class TestTsTzRange(SQLAlchemyTestCase):
session: Session = session.session
def test_select_as_text(self):
stmt = select(cast(literal(open(None, None), TsTzRange), TEXT))
assert self.session.scalar(stmt) == '(,)'
def test_declarative_insert_and_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
closed(NOW, NOW + timedelta(days=1)),
open(NOW, NOW + timedelta(days=1)),
open(None, None),
open(None, NOW),
openclosed(None, NOW),
closedopen(None, NOW),
]:
with self.subTest(interval=interval):
mem = TestTable(value=interval)
self.session.add(mem)
self.session.commit()
assert mem.value == interval
def test_literal_select(self):
for interval in [
open(NOW, NOW + timedelta(days=1)),
]:
with self.subTest(interval=interval):
stmt = select(cast(literal(interval, TsTzRange), TsTzRange))
assert self.session.scalar(stmt) == interval
|
|
400ebfd601cad411c22f7e7b483351b8d45e876e
|
tests/unit/test_component_dependencies.py
|
tests/unit/test_component_dependencies.py
|
#!/usr/bin/env python
#
# test_component_dependencies.py:
# unit tests for vyconf.components.dependencies
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import vyconf.components.dependencies as deps
def sort_inner(l):
for i in l:
i.sort()
class TestComponentDependencies(unittest.TestCase):
def test_valid_one_per_list(self):
data = {"foo": [], "bar": ["foo"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(deplist, [['foo'], ['bar']])
def test_valid_multiple_per_list(self):
data = {"foo": [], "bar": [], "baz": ["foo", "bar"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(sort_inner(deplist),
sort_inner([["foo", "bar"], ["baz"]]))
def test_invalid_missing_dep(self):
data = {"foo": ["bar"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
def test_invalid_loop(self):
data = {"foo": ["bar"], "bar": ["baz"], "baz": ["foo"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
|
Add unit tests for dependency sorting module.
|
Add unit tests for dependency sorting module.
|
Python
|
lgpl-2.1
|
vyos-legacy/vyconfd,vyos-legacy/vyconfd
|
Add unit tests for dependency sorting module.
|
#!/usr/bin/env python
#
# test_component_dependencies.py:
# unit tests for vyconf.components.dependencies
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import vyconf.components.dependencies as deps
def sort_inner(l):
for i in l:
i.sort()
class TestComponentDependencies(unittest.TestCase):
def test_valid_one_per_list(self):
data = {"foo": [], "bar": ["foo"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(deplist, [['foo'], ['bar']])
def test_valid_multiple_per_list(self):
data = {"foo": [], "bar": [], "baz": ["foo", "bar"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(sort_inner(deplist),
sort_inner([["foo", "bar"], ["baz"]]))
def test_invalid_missing_dep(self):
data = {"foo": ["bar"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
def test_invalid_loop(self):
data = {"foo": ["bar"], "bar": ["baz"], "baz": ["foo"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
|
<commit_before><commit_msg>Add unit tests for dependency sorting module.<commit_after>
|
#!/usr/bin/env python
#
# test_component_dependencies.py:
# unit tests for vyconf.components.dependencies
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import vyconf.components.dependencies as deps
def sort_inner(l):
for i in l:
i.sort()
class TestComponentDependencies(unittest.TestCase):
def test_valid_one_per_list(self):
data = {"foo": [], "bar": ["foo"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(deplist, [['foo'], ['bar']])
def test_valid_multiple_per_list(self):
data = {"foo": [], "bar": [], "baz": ["foo", "bar"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(sort_inner(deplist),
sort_inner([["foo", "bar"], ["baz"]]))
def test_invalid_missing_dep(self):
data = {"foo": ["bar"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
def test_invalid_loop(self):
data = {"foo": ["bar"], "bar": ["baz"], "baz": ["foo"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
|
Add unit tests for dependency sorting module.#!/usr/bin/env python
#
# test_component_dependencies.py:
# unit tests for vyconf.components.dependencies
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import vyconf.components.dependencies as deps
def sort_inner(l):
for i in l:
i.sort()
class TestComponentDependencies(unittest.TestCase):
def test_valid_one_per_list(self):
data = {"foo": [], "bar": ["foo"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(deplist, [['foo'], ['bar']])
def test_valid_multiple_per_list(self):
data = {"foo": [], "bar": [], "baz": ["foo", "bar"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(sort_inner(deplist),
sort_inner([["foo", "bar"], ["baz"]]))
def test_invalid_missing_dep(self):
data = {"foo": ["bar"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
def test_invalid_loop(self):
data = {"foo": ["bar"], "bar": ["baz"], "baz": ["foo"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
|
<commit_before><commit_msg>Add unit tests for dependency sorting module.<commit_after>#!/usr/bin/env python
#
# test_component_dependencies.py:
# unit tests for vyconf.components.dependencies
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import vyconf.components.dependencies as deps
def sort_inner(l):
for i in l:
i.sort()
class TestComponentDependencies(unittest.TestCase):
def test_valid_one_per_list(self):
data = {"foo": [], "bar": ["foo"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(deplist, [['foo'], ['bar']])
def test_valid_multiple_per_list(self):
data = {"foo": [], "bar": [], "baz": ["foo", "bar"]}
deplist = deps.DependencyList(data).get_dependencies()
self.assertEqual(sort_inner(deplist),
sort_inner([["foo", "bar"], ["baz"]]))
def test_invalid_missing_dep(self):
data = {"foo": ["bar"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
def test_invalid_loop(self):
data = {"foo": ["bar"], "bar": ["baz"], "baz": ["foo"]}
self.assertRaises(deps.DependencyError,
deps.DependencyList, data)
|
|
a81acb590f6d3a2fd701c6d223607661964c9152
|
app/access_control.py
|
app/access_control.py
|
from functools import wraps
from flask import flash, redirect, url_for, session
from app import views
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login to continue.", "danger")
return redirect(url_for("login"))
return decorated_function
|
Create a decorator `login_required` for access control on pages for logged in users.
|
Create a decorator `login_required` for access control on pages for logged in users.
|
Python
|
mit
|
alchermd/flask-todo-app,alchermd/flask-todo-app
|
Create a decorator `login_required` for access control on pages for logged in users.
|
from functools import wraps
from flask import flash, redirect, url_for, session
from app import views
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login to continue.", "danger")
return redirect(url_for("login"))
return decorated_function
|
<commit_before><commit_msg>Create a decorator `login_required` for access control on pages for logged in users.<commit_after>
|
from functools import wraps
from flask import flash, redirect, url_for, session
from app import views
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login to continue.", "danger")
return redirect(url_for("login"))
return decorated_function
|
Create a decorator `login_required` for access control on pages for logged in users.from functools import wraps
from flask import flash, redirect, url_for, session
from app import views
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login to continue.", "danger")
return redirect(url_for("login"))
return decorated_function
|
<commit_before><commit_msg>Create a decorator `login_required` for access control on pages for logged in users.<commit_after>from functools import wraps
from flask import flash, redirect, url_for, session
from app import views
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login to continue.", "danger")
return redirect(url_for("login"))
return decorated_function
|
|
a6091420faffc490ad721fbf86bd7b76002d5f16
|
examples/demo/image_plot_origin_and_orientation.py
|
examples/demo/image_plot_origin_and_orientation.py
|
"""
Demonstration of altering a plot's origin and orientation.
The origin parameter sets a plot's default origin to the specified corner
of the plot window. These positions has the following behavior:
* 'left' : index increases left to right
* 'right' : index increases right to left
* 'top' : index increases top to bottom
* 'bottom' : index increases bottom to top
The orientation parameter switches the x- and y-axes. Alternatively, you can
think of this as a transpose about the origin.
"""
# Major library imports
from scipy.misc import lena
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, GridContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
UItem('plot', editor=ComponentEditor(size=(1000, 500))),
orientation = "vertical"
),
resizable=True, title="Demo of image origin and orientation"
)
def _plot_default(self):
# Create a GridContainer to hold all of our plots: 2 rows, 4 columns:
container = GridContainer(fill_padding=True,
bgcolor="lightgray", use_backbuffer=True,
shape=(2, 4))
arrangements = [('top left', 'h'),
('top right', 'h'),
('top left', 'v'),
('top right', 'v'),
('bottom left', 'h'),
('bottom right', 'h'),
('bottom left', 'v'),
('bottom right', 'v')]
orientation_name = {'h': 'horizontal', 'v': 'vertical'}
pd = ArrayPlotData(image=lena())
# Plot some bessel functions and add the plots to our container
for origin, orientation in arrangements:
plot = Plot(pd, default_origin=origin, orientation=orientation)
plot.img_plot('image')
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
title = '{}, {}'
plot.title = title.format(orientation_name[orientation],
origin.replace(' ', '-'))
# Add to the grid container
container.add(plot)
return container
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
Add demo of origin and orientation values
|
Add demo of origin and orientation values
|
Python
|
bsd-3-clause
|
tommy-u/chaco,tommy-u/chaco,tommy-u/chaco
|
Add demo of origin and orientation values
|
"""
Demonstration of altering a plot's origin and orientation.
The origin parameter sets a plot's default origin to the specified corner
of the plot window. These positions has the following behavior:
* 'left' : index increases left to right
* 'right' : index increases right to left
* 'top' : index increases top to bottom
* 'bottom' : index increases bottom to top
The orientation parameter switches the x- and y-axes. Alternatively, you can
think of this as a transpose about the origin.
"""
# Major library imports
from scipy.misc import lena
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, GridContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
UItem('plot', editor=ComponentEditor(size=(1000, 500))),
orientation = "vertical"
),
resizable=True, title="Demo of image origin and orientation"
)
def _plot_default(self):
# Create a GridContainer to hold all of our plots: 2 rows, 4 columns:
container = GridContainer(fill_padding=True,
bgcolor="lightgray", use_backbuffer=True,
shape=(2, 4))
arrangements = [('top left', 'h'),
('top right', 'h'),
('top left', 'v'),
('top right', 'v'),
('bottom left', 'h'),
('bottom right', 'h'),
('bottom left', 'v'),
('bottom right', 'v')]
orientation_name = {'h': 'horizontal', 'v': 'vertical'}
pd = ArrayPlotData(image=lena())
# Plot some bessel functions and add the plots to our container
for origin, orientation in arrangements:
plot = Plot(pd, default_origin=origin, orientation=orientation)
plot.img_plot('image')
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
title = '{}, {}'
plot.title = title.format(orientation_name[orientation],
origin.replace(' ', '-'))
# Add to the grid container
container.add(plot)
return container
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
<commit_before><commit_msg>Add demo of origin and orientation values<commit_after>
|
"""
Demonstration of altering a plot's origin and orientation.
The origin parameter sets a plot's default origin to the specified corner
of the plot window. These positions has the following behavior:
* 'left' : index increases left to right
* 'right' : index increases right to left
* 'top' : index increases top to bottom
* 'bottom' : index increases bottom to top
The orientation parameter switches the x- and y-axes. Alternatively, you can
think of this as a transpose about the origin.
"""
# Major library imports
from scipy.misc import lena
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, GridContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
UItem('plot', editor=ComponentEditor(size=(1000, 500))),
orientation = "vertical"
),
resizable=True, title="Demo of image origin and orientation"
)
def _plot_default(self):
# Create a GridContainer to hold all of our plots: 2 rows, 4 columns:
container = GridContainer(fill_padding=True,
bgcolor="lightgray", use_backbuffer=True,
shape=(2, 4))
arrangements = [('top left', 'h'),
('top right', 'h'),
('top left', 'v'),
('top right', 'v'),
('bottom left', 'h'),
('bottom right', 'h'),
('bottom left', 'v'),
('bottom right', 'v')]
orientation_name = {'h': 'horizontal', 'v': 'vertical'}
pd = ArrayPlotData(image=lena())
# Plot some bessel functions and add the plots to our container
for origin, orientation in arrangements:
plot = Plot(pd, default_origin=origin, orientation=orientation)
plot.img_plot('image')
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
title = '{}, {}'
plot.title = title.format(orientation_name[orientation],
origin.replace(' ', '-'))
# Add to the grid container
container.add(plot)
return container
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
Add demo of origin and orientation values"""
Demonstration of altering a plot's origin and orientation.
The origin parameter sets a plot's default origin to the specified corner
of the plot window. These positions has the following behavior:
* 'left' : index increases left to right
* 'right' : index increases right to left
* 'top' : index increases top to bottom
* 'bottom' : index increases bottom to top
The orientation parameter switches the x- and y-axes. Alternatively, you can
think of this as a transpose about the origin.
"""
# Major library imports
from scipy.misc import lena
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, GridContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
UItem('plot', editor=ComponentEditor(size=(1000, 500))),
orientation = "vertical"
),
resizable=True, title="Demo of image origin and orientation"
)
def _plot_default(self):
# Create a GridContainer to hold all of our plots: 2 rows, 4 columns:
container = GridContainer(fill_padding=True,
bgcolor="lightgray", use_backbuffer=True,
shape=(2, 4))
arrangements = [('top left', 'h'),
('top right', 'h'),
('top left', 'v'),
('top right', 'v'),
('bottom left', 'h'),
('bottom right', 'h'),
('bottom left', 'v'),
('bottom right', 'v')]
orientation_name = {'h': 'horizontal', 'v': 'vertical'}
pd = ArrayPlotData(image=lena())
# Plot some bessel functions and add the plots to our container
for origin, orientation in arrangements:
plot = Plot(pd, default_origin=origin, orientation=orientation)
plot.img_plot('image')
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
title = '{}, {}'
plot.title = title.format(orientation_name[orientation],
origin.replace(' ', '-'))
# Add to the grid container
container.add(plot)
return container
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
<commit_before><commit_msg>Add demo of origin and orientation values<commit_after>"""
Demonstration of altering a plot's origin and orientation.
The origin parameter sets a plot's default origin to the specified corner
of the plot window. These positions has the following behavior:
* 'left' : index increases left to right
* 'right' : index increases right to left
* 'top' : index increases top to bottom
* 'bottom' : index increases bottom to top
The orientation parameter switches the x- and y-axes. Alternatively, you can
think of this as a transpose about the origin.
"""
# Major library imports
from scipy.misc import lena
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import UItem, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, GridContainer, Plot
from chaco.tools.api import PanTool, ZoomTool
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
UItem('plot', editor=ComponentEditor(size=(1000, 500))),
orientation = "vertical"
),
resizable=True, title="Demo of image origin and orientation"
)
def _plot_default(self):
# Create a GridContainer to hold all of our plots: 2 rows, 4 columns:
container = GridContainer(fill_padding=True,
bgcolor="lightgray", use_backbuffer=True,
shape=(2, 4))
arrangements = [('top left', 'h'),
('top right', 'h'),
('top left', 'v'),
('top right', 'v'),
('bottom left', 'h'),
('bottom right', 'h'),
('bottom left', 'v'),
('bottom right', 'v')]
orientation_name = {'h': 'horizontal', 'v': 'vertical'}
pd = ArrayPlotData(image=lena())
# Plot some bessel functions and add the plots to our container
for origin, orientation in arrangements:
plot = Plot(pd, default_origin=origin, orientation=orientation)
plot.img_plot('image')
# Attach some tools to the plot
plot.tools.append(PanTool(plot))
zoom = ZoomTool(plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
title = '{}, {}'
plot.title = title.format(orientation_name[orientation],
origin.replace(' ', '-'))
# Add to the grid container
container.add(plot)
return container
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
|
fd81e6e0d968ffd3e5aaf81827ad42c9ca0dcb15
|
tests/perf_concrete_execution.py
|
tests/perf_concrete_execution.py
|
# Performance tests on concrete code execution without invoking Unicorn engine
import os
import time
import logging
import angr
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_tight_loop(arch):
b = angr.Project(os.path.join(test_location, arch, "perf_tight_loops"), auto_load_libs=False)
simgr = b.factory.simgr()
# logging.getLogger('angr.sim_manager').setLevel(logging.INFO)
start = time.time()
simgr.explore()
elapsed = time.time() - start
print("Elapsed %f sec" % elapsed)
print(simgr)
if __name__ == "__main__":
test_tight_loop("x86_64")
|
Add a performance test for concrete execution.
|
Add a performance test for concrete execution.
|
Python
|
bsd-2-clause
|
schieb/angr,iamahuman/angr,iamahuman/angr,schieb/angr,angr/angr,angr/angr,iamahuman/angr,schieb/angr,angr/angr
|
Add a performance test for concrete execution.
|
# Performance tests on concrete code execution without invoking Unicorn engine
import os
import time
import logging
import angr
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_tight_loop(arch):
b = angr.Project(os.path.join(test_location, arch, "perf_tight_loops"), auto_load_libs=False)
simgr = b.factory.simgr()
# logging.getLogger('angr.sim_manager').setLevel(logging.INFO)
start = time.time()
simgr.explore()
elapsed = time.time() - start
print("Elapsed %f sec" % elapsed)
print(simgr)
if __name__ == "__main__":
test_tight_loop("x86_64")
|
<commit_before><commit_msg>Add a performance test for concrete execution.<commit_after>
|
# Performance tests on concrete code execution without invoking Unicorn engine
import os
import time
import logging
import angr
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_tight_loop(arch):
b = angr.Project(os.path.join(test_location, arch, "perf_tight_loops"), auto_load_libs=False)
simgr = b.factory.simgr()
# logging.getLogger('angr.sim_manager').setLevel(logging.INFO)
start = time.time()
simgr.explore()
elapsed = time.time() - start
print("Elapsed %f sec" % elapsed)
print(simgr)
if __name__ == "__main__":
test_tight_loop("x86_64")
|
Add a performance test for concrete execution.
# Performance tests on concrete code execution without invoking Unicorn engine
import os
import time
import logging
import angr
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_tight_loop(arch):
b = angr.Project(os.path.join(test_location, arch, "perf_tight_loops"), auto_load_libs=False)
simgr = b.factory.simgr()
# logging.getLogger('angr.sim_manager').setLevel(logging.INFO)
start = time.time()
simgr.explore()
elapsed = time.time() - start
print("Elapsed %f sec" % elapsed)
print(simgr)
if __name__ == "__main__":
test_tight_loop("x86_64")
|
<commit_before><commit_msg>Add a performance test for concrete execution.<commit_after>
# Performance tests on concrete code execution without invoking Unicorn engine
import os
import time
import logging
import angr
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests'))
def test_tight_loop(arch):
b = angr.Project(os.path.join(test_location, arch, "perf_tight_loops"), auto_load_libs=False)
simgr = b.factory.simgr()
# logging.getLogger('angr.sim_manager').setLevel(logging.INFO)
start = time.time()
simgr.explore()
elapsed = time.time() - start
print("Elapsed %f sec" % elapsed)
print(simgr)
if __name__ == "__main__":
test_tight_loop("x86_64")
|
|
a37ec297e01316981ab3976366adc54ad00d5f51
|
pulses-to-data.py
|
pulses-to-data.py
|
import pyaudio
import struct
import math
import time
"""
Constants and definitions
"""
RATE = 44100
INPUT_BLOCK_TIME = 0.01 # seconds
INPUT_FRAMES_PER_BLOCK = int(RATE * INPUT_BLOCK_TIME)
PULSE_THRESHOLD = 0.6
MIN_SECONDS_BETWEEN_PULSES = 0.28
def get_rms(block):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * (1.0/32768.0)
sum_squares += n*n
return math.sqrt( sum_squares / count )
def is_pulse(pulse_block):
global last_pulse_timestamp
amplitude = get_rms(pulse_block)
if amplitude > PULSE_THRESHOLD:
if get_current_timestamp() - last_pulse_timestamp >= MIN_SECONDS_BETWEEN_PULSES:
return True
return False
def get_current_timestamp():
return time.time();
def get_pulse_stream(pyaudio_instance):
return pyaudio_instance.open(
format = pyaudio.paInt16,
channels = 2,
rate = RATE,
input = True,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK
)
def listen_for_pulses(pulse_stream):
global last_pulse_timestamp, pulse_counter
while True:
try:
pulse_block = pulse_stream.read(INPUT_FRAMES_PER_BLOCK)
if is_pulse(pulse_block):
last_pulse_timestamp = get_current_timestamp()
pulse_counter += 1
print 'pulse: ', pulse_counter
except IOError, e:
print( "Error recording: ", e)
"""
Program
"""
pAudio = pyaudio.PyAudio()
pulse_stream = get_pulse_stream(pAudio)
last_pulse_timestamp = 0
pulse_counter = 0
listen_for_pulses(pulse_stream)
|
Add initial code for pulse counting
|
Add initial code for pulse counting
Brief intro: my stationary bicycle *had* a measurement monitor with
speed, traveled kms, etc. However, it broke down. As my stationary bike
is quite old, I was not able to find a replacement (a good thing). I
noticed that the cable going from the rotor to the monitoring device has
a 3.5mm jack ending. I put it into the microphone slot on my computer
and started pedalling. I noticed that after each finished circle (round,
or candence or how it's called), it made a beeping or *pulsing* sound.
Long story short, I made a simple Python program that monitors the
microphone and tries to detect the *pulses*. If it detect the pulse, it
simply prints out "pulse". When you know the distance you traveled in
each *pulse*, you can calculate almost anything (speed, distance, etc).
I'm going to create a simple React-based UI for stationary bicycles
based on this discovery.
In this commit, I'm adding a simple python program to detect the pulses
form the stationary bicycle.
|
Python
|
mit
|
lamosty/exercise-bike-ui,lamosty/exercise-bike-ui
|
Add initial code for pulse counting
Brief intro: my stationary bicycle *had* a measurement monitor with
speed, traveled kms, etc. However, it broke down. As my stationary bike
is quite old, I was not able to find a replacement (a good thing). I
noticed that the cable going from the rotor to the monitoring device has
a 3.5mm jack ending. I put it into the microphone slot on my computer
and started pedalling. I noticed that after each finished circle (round,
or candence or how it's called), it made a beeping or *pulsing* sound.
Long story short, I made a simple Python program that monitors the
microphone and tries to detect the *pulses*. If it detect the pulse, it
simply prints out "pulse". When you know the distance you traveled in
each *pulse*, you can calculate almost anything (speed, distance, etc).
I'm going to create a simple React-based UI for stationary bicycles
based on this discovery.
In this commit, I'm adding a simple python program to detect the pulses
form the stationary bicycle.
|
import pyaudio
import struct
import math
import time
"""
Constants and definitions
"""
RATE = 44100
INPUT_BLOCK_TIME = 0.01 # seconds
INPUT_FRAMES_PER_BLOCK = int(RATE * INPUT_BLOCK_TIME)
PULSE_THRESHOLD = 0.6
MIN_SECONDS_BETWEEN_PULSES = 0.28
def get_rms(block):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * (1.0/32768.0)
sum_squares += n*n
return math.sqrt( sum_squares / count )
def is_pulse(pulse_block):
global last_pulse_timestamp
amplitude = get_rms(pulse_block)
if amplitude > PULSE_THRESHOLD:
if get_current_timestamp() - last_pulse_timestamp >= MIN_SECONDS_BETWEEN_PULSES:
return True
return False
def get_current_timestamp():
return time.time();
def get_pulse_stream(pyaudio_instance):
return pyaudio_instance.open(
format = pyaudio.paInt16,
channels = 2,
rate = RATE,
input = True,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK
)
def listen_for_pulses(pulse_stream):
global last_pulse_timestamp, pulse_counter
while True:
try:
pulse_block = pulse_stream.read(INPUT_FRAMES_PER_BLOCK)
if is_pulse(pulse_block):
last_pulse_timestamp = get_current_timestamp()
pulse_counter += 1
print 'pulse: ', pulse_counter
except IOError, e:
print( "Error recording: ", e)
"""
Program
"""
pAudio = pyaudio.PyAudio()
pulse_stream = get_pulse_stream(pAudio)
last_pulse_timestamp = 0
pulse_counter = 0
listen_for_pulses(pulse_stream)
|
<commit_before><commit_msg>Add initial code for pulse counting
Brief intro: my stationary bicycle *had* a measurement monitor with
speed, traveled kms, etc. However, it broke down. As my stationary bike
is quite old, I was not able to find a replacement (a good thing). I
noticed that the cable going from the rotor to the monitoring device has
a 3.5mm jack ending. I put it into the microphone slot on my computer
and started pedalling. I noticed that after each finished circle (round,
or candence or how it's called), it made a beeping or *pulsing* sound.
Long story short, I made a simple Python program that monitors the
microphone and tries to detect the *pulses*. If it detect the pulse, it
simply prints out "pulse". When you know the distance you traveled in
each *pulse*, you can calculate almost anything (speed, distance, etc).
I'm going to create a simple React-based UI for stationary bicycles
based on this discovery.
In this commit, I'm adding a simple python program to detect the pulses
form the stationary bicycle.<commit_after>
|
import pyaudio
import struct
import math
import time
"""
Constants and definitions
"""
RATE = 44100
INPUT_BLOCK_TIME = 0.01 # seconds
INPUT_FRAMES_PER_BLOCK = int(RATE * INPUT_BLOCK_TIME)
PULSE_THRESHOLD = 0.6
MIN_SECONDS_BETWEEN_PULSES = 0.28
def get_rms(block):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * (1.0/32768.0)
sum_squares += n*n
return math.sqrt( sum_squares / count )
def is_pulse(pulse_block):
global last_pulse_timestamp
amplitude = get_rms(pulse_block)
if amplitude > PULSE_THRESHOLD:
if get_current_timestamp() - last_pulse_timestamp >= MIN_SECONDS_BETWEEN_PULSES:
return True
return False
def get_current_timestamp():
return time.time();
def get_pulse_stream(pyaudio_instance):
return pyaudio_instance.open(
format = pyaudio.paInt16,
channels = 2,
rate = RATE,
input = True,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK
)
def listen_for_pulses(pulse_stream):
global last_pulse_timestamp, pulse_counter
while True:
try:
pulse_block = pulse_stream.read(INPUT_FRAMES_PER_BLOCK)
if is_pulse(pulse_block):
last_pulse_timestamp = get_current_timestamp()
pulse_counter += 1
print 'pulse: ', pulse_counter
except IOError, e:
print( "Error recording: ", e)
"""
Program
"""
pAudio = pyaudio.PyAudio()
pulse_stream = get_pulse_stream(pAudio)
last_pulse_timestamp = 0
pulse_counter = 0
listen_for_pulses(pulse_stream)
|
Add initial code for pulse counting
Brief intro: my stationary bicycle *had* a measurement monitor with
speed, traveled kms, etc. However, it broke down. As my stationary bike
is quite old, I was not able to find a replacement (a good thing). I
noticed that the cable going from the rotor to the monitoring device has
a 3.5mm jack ending. I put it into the microphone slot on my computer
and started pedalling. I noticed that after each finished circle (round,
or candence or how it's called), it made a beeping or *pulsing* sound.
Long story short, I made a simple Python program that monitors the
microphone and tries to detect the *pulses*. If it detect the pulse, it
simply prints out "pulse". When you know the distance you traveled in
each *pulse*, you can calculate almost anything (speed, distance, etc).
I'm going to create a simple React-based UI for stationary bicycles
based on this discovery.
In this commit, I'm adding a simple python program to detect the pulses
form the stationary bicycle.import pyaudio
import struct
import math
import time
"""
Constants and definitions
"""
RATE = 44100
INPUT_BLOCK_TIME = 0.01 # seconds
INPUT_FRAMES_PER_BLOCK = int(RATE * INPUT_BLOCK_TIME)
PULSE_THRESHOLD = 0.6
MIN_SECONDS_BETWEEN_PULSES = 0.28
def get_rms(block):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * (1.0/32768.0)
sum_squares += n*n
return math.sqrt( sum_squares / count )
def is_pulse(pulse_block):
global last_pulse_timestamp
amplitude = get_rms(pulse_block)
if amplitude > PULSE_THRESHOLD:
if get_current_timestamp() - last_pulse_timestamp >= MIN_SECONDS_BETWEEN_PULSES:
return True
return False
def get_current_timestamp():
return time.time();
def get_pulse_stream(pyaudio_instance):
return pyaudio_instance.open(
format = pyaudio.paInt16,
channels = 2,
rate = RATE,
input = True,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK
)
def listen_for_pulses(pulse_stream):
global last_pulse_timestamp, pulse_counter
while True:
try:
pulse_block = pulse_stream.read(INPUT_FRAMES_PER_BLOCK)
if is_pulse(pulse_block):
last_pulse_timestamp = get_current_timestamp()
pulse_counter += 1
print 'pulse: ', pulse_counter
except IOError, e:
print( "Error recording: ", e)
"""
Program
"""
pAudio = pyaudio.PyAudio()
pulse_stream = get_pulse_stream(pAudio)
last_pulse_timestamp = 0
pulse_counter = 0
listen_for_pulses(pulse_stream)
|
<commit_before><commit_msg>Add initial code for pulse counting
Brief intro: my stationary bicycle *had* a measurement monitor with
speed, traveled kms, etc. However, it broke down. As my stationary bike
is quite old, I was not able to find a replacement (a good thing). I
noticed that the cable going from the rotor to the monitoring device has
a 3.5mm jack ending. I put it into the microphone slot on my computer
and started pedalling. I noticed that after each finished circle (round,
or candence or how it's called), it made a beeping or *pulsing* sound.
Long story short, I made a simple Python program that monitors the
microphone and tries to detect the *pulses*. If it detect the pulse, it
simply prints out "pulse". When you know the distance you traveled in
each *pulse*, you can calculate almost anything (speed, distance, etc).
I'm going to create a simple React-based UI for stationary bicycles
based on this discovery.
In this commit, I'm adding a simple python program to detect the pulses
form the stationary bicycle.<commit_after>import pyaudio
import struct
import math
import time
"""
Constants and definitions
"""
RATE = 44100
INPUT_BLOCK_TIME = 0.01 # seconds
INPUT_FRAMES_PER_BLOCK = int(RATE * INPUT_BLOCK_TIME)
PULSE_THRESHOLD = 0.6
MIN_SECONDS_BETWEEN_PULSES = 0.28
def get_rms(block):
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * (1.0/32768.0)
sum_squares += n*n
return math.sqrt( sum_squares / count )
def is_pulse(pulse_block):
global last_pulse_timestamp
amplitude = get_rms(pulse_block)
if amplitude > PULSE_THRESHOLD:
if get_current_timestamp() - last_pulse_timestamp >= MIN_SECONDS_BETWEEN_PULSES:
return True
return False
def get_current_timestamp():
return time.time();
def get_pulse_stream(pyaudio_instance):
return pyaudio_instance.open(
format = pyaudio.paInt16,
channels = 2,
rate = RATE,
input = True,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK
)
def listen_for_pulses(pulse_stream):
global last_pulse_timestamp, pulse_counter
while True:
try:
pulse_block = pulse_stream.read(INPUT_FRAMES_PER_BLOCK)
if is_pulse(pulse_block):
last_pulse_timestamp = get_current_timestamp()
pulse_counter += 1
print 'pulse: ', pulse_counter
except IOError, e:
print( "Error recording: ", e)
"""
Program
"""
pAudio = pyaudio.PyAudio()
pulse_stream = get_pulse_stream(pAudio)
last_pulse_timestamp = 0
pulse_counter = 0
listen_for_pulses(pulse_stream)
|
|
8790e50d71c6c24b3728f2d8f1d5c94c491abc90
|
indra/tests/test_belief_engine.py
|
indra/tests/test_belief_engine.py
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('a'), evidence=[ev2], supports=[st1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
|
Add first round of tests
|
Add first round of tests
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,jmuhlich/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/belpy,bgyori/indra,sorgerlab/indra,pvtodorov/indra,sorgerlab/indra,johnbachman/belpy,johnbachman/indra,jmuhlich/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,bgyori/indra,jmuhlich/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/belpy,sorgerlab/belpy
|
Add first round of tests
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('a'), evidence=[ev2], supports=[st1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
|
<commit_before><commit_msg>Add first round of tests<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('a'), evidence=[ev2], supports=[st1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
|
Add first round of testsfrom __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('a'), evidence=[ev2], supports=[st1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
|
<commit_before><commit_msg>Add first round of tests<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.belief import BeliefEngine
ev1 = Evidence(source_api='reach')
ev2 = Evidence(source_api='trips')
ev3 = Evidence(source_api='assertion')
def test_prior_prob_one():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_same():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_two_different():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach'] +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_one_two():
be = BeliefEngine()
prob = 1 - (be.prior_probs['rand']['reach']**2 +
be.prior_probs['syst']['reach']) * \
(be.prior_probs['rand']['trips'] +
be.prior_probs['syst']['trips'])
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == prob)
def test_prior_prob_assertion():
be = BeliefEngine()
st = Phosphorylation(None, Agent('a'), evidence=[ev1, ev1, ev2, ev3])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
def test_hierarchy_probs1():
be = BeliefEngine()
st1 = Phosphorylation(None, Agent('a'), evidence=[ev1])
st2 = Phosphorylation(None, Agent('a'), evidence=[ev2], supports=[st1])
assert(st.belief == 1)
be.set_prior_probs([st])
assert(st.belief == 1)
'''
|
|
19e220698978a5cfe8129d1f207d390d1a343c1a
|
st2client/tests/unit/test_action_alias.py
|
st2client/tests/unit/test_action_alias.py
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client import models
from st2client.utils import httpclient
MOCK_MATCH_AND_EXECUTE_RESULT = {
"results": [
{
"execution": {
"id": "mock-id",
},
"actionalias": {
"ref": "mock-ref"
}
}
]
}
class ActionAliasCommandTestCase(base.BaseCLITestCase):
def __init__(self, *args, **kwargs):
super(ActionAliasCommandTestCase, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(MOCK_MATCH_AND_EXECUTE_RESULT), 200, 'OK')))
def test_match_and_execute(self):
ret = self.shell.run(['action-alias', 'execute', "run whoami on localhost"])
self.assertEqual(ret, 0)
expected_args = {
'command': 'run whoami on localhost',
'user': '',
'source_channel': 'cli'
}
httpclient.HTTPClient.post.assert_called_with('/aliasexecution/match_and_execute', expected_args)
mock_stdout = self.stdout.getvalue()
self.assertTrue("Matching Action-alias: 'mock-ref'" in mock_stdout)
self.assertTrue("st2 execution get mock-id" in mock_stdout)
|
Add a direct st2 CLI test case for st2 alias-execution execute command.
|
Add a direct st2 CLI test case for st2 alias-execution execute command.
|
Python
|
apache-2.0
|
StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2
|
Add a direct st2 CLI test case for st2 alias-execution execute command.
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client import models
from st2client.utils import httpclient
MOCK_MATCH_AND_EXECUTE_RESULT = {
"results": [
{
"execution": {
"id": "mock-id",
},
"actionalias": {
"ref": "mock-ref"
}
}
]
}
class ActionAliasCommandTestCase(base.BaseCLITestCase):
def __init__(self, *args, **kwargs):
super(ActionAliasCommandTestCase, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(MOCK_MATCH_AND_EXECUTE_RESULT), 200, 'OK')))
def test_match_and_execute(self):
ret = self.shell.run(['action-alias', 'execute', "run whoami on localhost"])
self.assertEqual(ret, 0)
expected_args = {
'command': 'run whoami on localhost',
'user': '',
'source_channel': 'cli'
}
httpclient.HTTPClient.post.assert_called_with('/aliasexecution/match_and_execute', expected_args)
mock_stdout = self.stdout.getvalue()
self.assertTrue("Matching Action-alias: 'mock-ref'" in mock_stdout)
self.assertTrue("st2 execution get mock-id" in mock_stdout)
|
<commit_before><commit_msg>Add a direct st2 CLI test case for st2 alias-execution execute command.<commit_after>
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client import models
from st2client.utils import httpclient
MOCK_MATCH_AND_EXECUTE_RESULT = {
"results": [
{
"execution": {
"id": "mock-id",
},
"actionalias": {
"ref": "mock-ref"
}
}
]
}
class ActionAliasCommandTestCase(base.BaseCLITestCase):
def __init__(self, *args, **kwargs):
super(ActionAliasCommandTestCase, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(MOCK_MATCH_AND_EXECUTE_RESULT), 200, 'OK')))
def test_match_and_execute(self):
ret = self.shell.run(['action-alias', 'execute', "run whoami on localhost"])
self.assertEqual(ret, 0)
expected_args = {
'command': 'run whoami on localhost',
'user': '',
'source_channel': 'cli'
}
httpclient.HTTPClient.post.assert_called_with('/aliasexecution/match_and_execute', expected_args)
mock_stdout = self.stdout.getvalue()
self.assertTrue("Matching Action-alias: 'mock-ref'" in mock_stdout)
self.assertTrue("st2 execution get mock-id" in mock_stdout)
|
Add a direct st2 CLI test case for st2 alias-execution execute command.# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client import models
from st2client.utils import httpclient
MOCK_MATCH_AND_EXECUTE_RESULT = {
"results": [
{
"execution": {
"id": "mock-id",
},
"actionalias": {
"ref": "mock-ref"
}
}
]
}
class ActionAliasCommandTestCase(base.BaseCLITestCase):
def __init__(self, *args, **kwargs):
super(ActionAliasCommandTestCase, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(MOCK_MATCH_AND_EXECUTE_RESULT), 200, 'OK')))
def test_match_and_execute(self):
ret = self.shell.run(['action-alias', 'execute', "run whoami on localhost"])
self.assertEqual(ret, 0)
expected_args = {
'command': 'run whoami on localhost',
'user': '',
'source_channel': 'cli'
}
httpclient.HTTPClient.post.assert_called_with('/aliasexecution/match_and_execute', expected_args)
mock_stdout = self.stdout.getvalue()
self.assertTrue("Matching Action-alias: 'mock-ref'" in mock_stdout)
self.assertTrue("st2 execution get mock-id" in mock_stdout)
|
<commit_before><commit_msg>Add a direct st2 CLI test case for st2 alias-execution execute command.<commit_after># Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import mock
import logging
from tests import base
from st2client import shell
from st2client import models
from st2client.utils import httpclient
MOCK_MATCH_AND_EXECUTE_RESULT = {
"results": [
{
"execution": {
"id": "mock-id",
},
"actionalias": {
"ref": "mock-ref"
}
}
]
}
class ActionAliasCommandTestCase(base.BaseCLITestCase):
def __init__(self, *args, **kwargs):
super(ActionAliasCommandTestCase, self).__init__(*args, **kwargs)
self.shell = shell.Shell()
@mock.patch.object(
httpclient.HTTPClient, 'post',
mock.MagicMock(return_value=base.FakeResponse(json.dumps(MOCK_MATCH_AND_EXECUTE_RESULT), 200, 'OK')))
def test_match_and_execute(self):
ret = self.shell.run(['action-alias', 'execute', "run whoami on localhost"])
self.assertEqual(ret, 0)
expected_args = {
'command': 'run whoami on localhost',
'user': '',
'source_channel': 'cli'
}
httpclient.HTTPClient.post.assert_called_with('/aliasexecution/match_and_execute', expected_args)
mock_stdout = self.stdout.getvalue()
self.assertTrue("Matching Action-alias: 'mock-ref'" in mock_stdout)
self.assertTrue("st2 execution get mock-id" in mock_stdout)
|
|
80eb53ffeb51b3a6707c9714a0ed7acf7228b017
|
php5_fpm.py
|
php5_fpm.py
|
#!/usr/bin/env python
#
# igcollect - PHP5 FPM
#
# This is the data collector for the PHP5 FPM status page. It makes a
# HTTP request to get the page, and formats the output. All the numeric
# values of the requested pool is printed.
#
# Copyright (c) 2016, InnoGames GmbH
#
from __future__ import print_function
import urllib2
import socket
import time
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='servers.{hostname}.software.php5_fpm')
parser.add_argument('--host', default='localhost')
parser.add_argument('--address')
parser.add_argument('--location', default='/fpm-status')
parser.add_argument('--pool', default='www')
return vars(parser.parse_args())
def main(prefix, host, location, pool, address=None):
"""The main program"""
url = 'http://' + (address or host) + location
request = urllib2.Request(url, headers={'Host': host})
response = urllib2.urlopen(request)
hostname = socket.gethostname().replace('.', '_')
now = str(int(time.time()))
prefix = prefix.format(hostname=hostname)
pool_found = False
for line in response.readlines():
key, value = line.split(':', 1)
key = key.replace(' ', '_')
value = value.strip()
if key == 'pool':
pool_found = value == pool
if pool_found and value.isdigit():
print(prefix + '.' + key, value.strip(), now)
if __name__ == '__main__':
main(**parse_args())
|
Add plugin for PHP5 FPM
|
Add plugin for PHP5 FPM
|
Python
|
mit
|
innogames/igcollect
|
Add plugin for PHP5 FPM
|
#!/usr/bin/env python
#
# igcollect - PHP5 FPM
#
# This is the data collector for the PHP5 FPM status page. It makes a
# HTTP request to get the page, and formats the output. All the numeric
# values of the requested pool is printed.
#
# Copyright (c) 2016, InnoGames GmbH
#
from __future__ import print_function
import urllib2
import socket
import time
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='servers.{hostname}.software.php5_fpm')
parser.add_argument('--host', default='localhost')
parser.add_argument('--address')
parser.add_argument('--location', default='/fpm-status')
parser.add_argument('--pool', default='www')
return vars(parser.parse_args())
def main(prefix, host, location, pool, address=None):
"""The main program"""
url = 'http://' + (address or host) + location
request = urllib2.Request(url, headers={'Host': host})
response = urllib2.urlopen(request)
hostname = socket.gethostname().replace('.', '_')
now = str(int(time.time()))
prefix = prefix.format(hostname=hostname)
pool_found = False
for line in response.readlines():
key, value = line.split(':', 1)
key = key.replace(' ', '_')
value = value.strip()
if key == 'pool':
pool_found = value == pool
if pool_found and value.isdigit():
print(prefix + '.' + key, value.strip(), now)
if __name__ == '__main__':
main(**parse_args())
|
<commit_before><commit_msg>Add plugin for PHP5 FPM<commit_after>
|
#!/usr/bin/env python
#
# igcollect - PHP5 FPM
#
# This is the data collector for the PHP5 FPM status page. It makes a
# HTTP request to get the page, and formats the output. All the numeric
# values of the requested pool is printed.
#
# Copyright (c) 2016, InnoGames GmbH
#
from __future__ import print_function
import urllib2
import socket
import time
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='servers.{hostname}.software.php5_fpm')
parser.add_argument('--host', default='localhost')
parser.add_argument('--address')
parser.add_argument('--location', default='/fpm-status')
parser.add_argument('--pool', default='www')
return vars(parser.parse_args())
def main(prefix, host, location, pool, address=None):
"""The main program"""
url = 'http://' + (address or host) + location
request = urllib2.Request(url, headers={'Host': host})
response = urllib2.urlopen(request)
hostname = socket.gethostname().replace('.', '_')
now = str(int(time.time()))
prefix = prefix.format(hostname=hostname)
pool_found = False
for line in response.readlines():
key, value = line.split(':', 1)
key = key.replace(' ', '_')
value = value.strip()
if key == 'pool':
pool_found = value == pool
if pool_found and value.isdigit():
print(prefix + '.' + key, value.strip(), now)
if __name__ == '__main__':
main(**parse_args())
|
Add plugin for PHP5 FPM#!/usr/bin/env python
#
# igcollect - PHP5 FPM
#
# This is the data collector for the PHP5 FPM status page. It makes a
# HTTP request to get the page, and formats the output. All the numeric
# values of the requested pool is printed.
#
# Copyright (c) 2016, InnoGames GmbH
#
from __future__ import print_function
import urllib2
import socket
import time
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='servers.{hostname}.software.php5_fpm')
parser.add_argument('--host', default='localhost')
parser.add_argument('--address')
parser.add_argument('--location', default='/fpm-status')
parser.add_argument('--pool', default='www')
return vars(parser.parse_args())
def main(prefix, host, location, pool, address=None):
"""The main program"""
url = 'http://' + (address or host) + location
request = urllib2.Request(url, headers={'Host': host})
response = urllib2.urlopen(request)
hostname = socket.gethostname().replace('.', '_')
now = str(int(time.time()))
prefix = prefix.format(hostname=hostname)
pool_found = False
for line in response.readlines():
key, value = line.split(':', 1)
key = key.replace(' ', '_')
value = value.strip()
if key == 'pool':
pool_found = value == pool
if pool_found and value.isdigit():
print(prefix + '.' + key, value.strip(), now)
if __name__ == '__main__':
main(**parse_args())
|
<commit_before><commit_msg>Add plugin for PHP5 FPM<commit_after>#!/usr/bin/env python
#
# igcollect - PHP5 FPM
#
# This is the data collector for the PHP5 FPM status page. It makes a
# HTTP request to get the page, and formats the output. All the numeric
# values of the requested pool is printed.
#
# Copyright (c) 2016, InnoGames GmbH
#
from __future__ import print_function
import urllib2
import socket
import time
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='servers.{hostname}.software.php5_fpm')
parser.add_argument('--host', default='localhost')
parser.add_argument('--address')
parser.add_argument('--location', default='/fpm-status')
parser.add_argument('--pool', default='www')
return vars(parser.parse_args())
def main(prefix, host, location, pool, address=None):
"""The main program"""
url = 'http://' + (address or host) + location
request = urllib2.Request(url, headers={'Host': host})
response = urllib2.urlopen(request)
hostname = socket.gethostname().replace('.', '_')
now = str(int(time.time()))
prefix = prefix.format(hostname=hostname)
pool_found = False
for line in response.readlines():
key, value = line.split(':', 1)
key = key.replace(' ', '_')
value = value.strip()
if key == 'pool':
pool_found = value == pool
if pool_found and value.isdigit():
print(prefix + '.' + key, value.strip(), now)
if __name__ == '__main__':
main(**parse_args())
|
|
54caac00ed4b6f302ee3dc4466711e0737f18352
|
xpathwebdriver_tests/wipe_alerts.py
|
xpathwebdriver_tests/wipe_alerts.py
|
from xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
import unittest
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
from selenium.common.exceptions import UnexpectedAlertPresentException
body = '''
<script type="text/javascript">
alert('Example alert');
</script>
'''
try:
with self.create_html('test_wipe_alerts', body) as path:
self.get_local_page(path)
except UnexpectedAlertPresentException:
self.browser.wipe_alerts()
else:
self.fail('No alert wiped')
if __name__ == "__main__":
unittest.main()
|
Fix failing test in Codeship
|
Fix failing test in Codeship
|
Python
|
mit
|
joaduo/xpathwebdriver,joaduo/xpathwebdriver
|
Fix failing test in Codeship
|
from xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
import unittest
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
from selenium.common.exceptions import UnexpectedAlertPresentException
body = '''
<script type="text/javascript">
alert('Example alert');
</script>
'''
try:
with self.create_html('test_wipe_alerts', body) as path:
self.get_local_page(path)
except UnexpectedAlertPresentException:
self.browser.wipe_alerts()
else:
self.fail('No alert wiped')
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Fix failing test in Codeship<commit_after>
|
from xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
import unittest
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
from selenium.common.exceptions import UnexpectedAlertPresentException
body = '''
<script type="text/javascript">
alert('Example alert');
</script>
'''
try:
with self.create_html('test_wipe_alerts', body) as path:
self.get_local_page(path)
except UnexpectedAlertPresentException:
self.browser.wipe_alerts()
else:
self.fail('No alert wiped')
if __name__ == "__main__":
unittest.main()
|
Fix failing test in Codeshipfrom xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
import unittest
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
from selenium.common.exceptions import UnexpectedAlertPresentException
body = '''
<script type="text/javascript">
alert('Example alert');
</script>
'''
try:
with self.create_html('test_wipe_alerts', body) as path:
self.get_local_page(path)
except UnexpectedAlertPresentException:
self.browser.wipe_alerts()
else:
self.fail('No alert wiped')
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Fix failing test in Codeship<commit_after>from xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
import unittest
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
from selenium.common.exceptions import UnexpectedAlertPresentException
body = '''
<script type="text/javascript">
alert('Example alert');
</script>
'''
try:
with self.create_html('test_wipe_alerts', body) as path:
self.get_local_page(path)
except UnexpectedAlertPresentException:
self.browser.wipe_alerts()
else:
self.fail('No alert wiped')
if __name__ == "__main__":
unittest.main()
|
|
801165049f9536840dd226c1790d41178dd6c812
|
rdfalchemy/py3compat.py
|
rdfalchemy/py3compat.py
|
"""
Utility functions and objects to ease Python 3 compatibility.
Contributed to rdflib 3 by Thomas Kluyver, re-used here.
"""
import sys
try:
from functools import wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf): return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, unicode):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'', 'b':'b', 'L':''}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
else:
# Python 2
# --------
def b(s):
return s
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'u', 'b':'', 'L':'L'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
Copy Thomas Kluyver's Py3 compat from rdflib
|
Copy Thomas Kluyver's Py3 compat from rdflib
|
Python
|
mit
|
gjhiggins/RDFAlchemy
|
Copy Thomas Kluyver's Py3 compat from rdflib
|
"""
Utility functions and objects to ease Python 3 compatibility.
Contributed to rdflib 3 by Thomas Kluyver, re-used here.
"""
import sys
try:
from functools import wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf): return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, unicode):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'', 'b':'b', 'L':''}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
else:
# Python 2
# --------
def b(s):
return s
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'u', 'b':'', 'L':'L'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
<commit_before><commit_msg>Copy Thomas Kluyver's Py3 compat from rdflib<commit_after>
|
"""
Utility functions and objects to ease Python 3 compatibility.
Contributed to rdflib 3 by Thomas Kluyver, re-used here.
"""
import sys
try:
from functools import wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf): return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, unicode):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'', 'b':'b', 'L':''}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
else:
# Python 2
# --------
def b(s):
return s
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'u', 'b':'', 'L':'L'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
Copy Thomas Kluyver's Py3 compat from rdflib"""
Utility functions and objects to ease Python 3 compatibility.
Contributed to rdflib 3 by Thomas Kluyver, re-used here.
"""
import sys
try:
from functools import wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf): return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, unicode):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'', 'b':'b', 'L':''}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
else:
# Python 2
# --------
def b(s):
return s
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'u', 'b':'', 'L':'L'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
<commit_before><commit_msg>Copy Thomas Kluyver's Py3 compat from rdflib<commit_after>"""
Utility functions and objects to ease Python 3 compatibility.
Contributed to rdflib 3 by Thomas Kluyver, re-used here.
"""
import sys
try:
from functools import wraps
except ImportError:
# No-op wraps decorator
def wraps(f):
def dec(newf): return newf
return dec
def cast_bytes(s, enc='utf-8'):
if isinstance(s, unicode):
return s.encode(enc)
return s
PY3 = (sys.version_info[0] >= 3)
def _modify_str_or_docstring(str_change_func):
@wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, str):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
if PY3:
# Python 3:
# ---------
def b(s):
return s.encode('ascii')
bytestype = bytes
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "'abc'"
"%(b)s'abc'" --> "b'abc'"
"55%(L)s" --> "55"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'', 'b':'b', 'L':''}
def type_cmp(a, b):
"""Python 2 style comparison based on type"""
ta, tb = type(a).__name__, type(b).__name__
# Ugly hack: some tests rely on tuple sorting before unicode, and I
# don't know if that's important. Better retain it for now.
if ta == 'str':
ta = 'unicode'
if tb == 'str':
tb = 'unicode'
# return 1 if ta > tb else -1 if ta < tb else 0
if ta > tb:
return 1
elif ta < tb:
return -1
else:
return 0
else:
# Python 2
# --------
def b(s):
return s
bytestype = str
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def format_doctest_out(s):
"""Python 2 version
"%(u)s'abc'" --> "u'abc'"
"%(b)s'abc'" --> "'abc'"
"55%(L)s" --> "55L"
Accepts a string or a function, so it can be used as a decorator."""
return s % {'u':'u', 'b':'', 'L':'L'}
def type_cmp(a, b):
# return 1 if a > b else -1 if a < b else 0
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
|
5fa28bdbd6605939c92508245ca4d3e6a2fbbaaf
|
tests/test_ctrl_proc.py
|
tests/test_ctrl_proc.py
|
#!/usr/bin/env python
import time
from unittest import main, TestCase
import zmq
from neurokernel.tools.comm import get_random_port
from neurokernel.ctrl_proc import ControlledProcess
class test_ctrl_proc(TestCase):
def setUp(self):
ctx = zmq.Context()
self.sock = ctx.socket(zmq.ROUTER)
self.port = self.sock.bind_to_random_port('tcp://*')
self.proc = ControlledProcess(self.port, 'proc')
def tearDown(self):
self.proc.terminate()
def test_ctrl_quit(self):
self.proc.start()
time.sleep(1)
self.sock.send_multipart([self.proc.id, 'quit'])
time.sleep(1)
assert not self.proc.is_alive()
if __name__ == '__main__':
main()
|
Add test of zmq-based control mechanism for ControlledProcess class.
|
Add test of zmq-based control mechanism for ControlledProcess class.
|
Python
|
bsd-3-clause
|
cerrno/neurokernel
|
Add test of zmq-based control mechanism for ControlledProcess class.
|
#!/usr/bin/env python
import time
from unittest import main, TestCase
import zmq
from neurokernel.tools.comm import get_random_port
from neurokernel.ctrl_proc import ControlledProcess
class test_ctrl_proc(TestCase):
def setUp(self):
ctx = zmq.Context()
self.sock = ctx.socket(zmq.ROUTER)
self.port = self.sock.bind_to_random_port('tcp://*')
self.proc = ControlledProcess(self.port, 'proc')
def tearDown(self):
self.proc.terminate()
def test_ctrl_quit(self):
self.proc.start()
time.sleep(1)
self.sock.send_multipart([self.proc.id, 'quit'])
time.sleep(1)
assert not self.proc.is_alive()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test of zmq-based control mechanism for ControlledProcess class.<commit_after>
|
#!/usr/bin/env python
import time
from unittest import main, TestCase
import zmq
from neurokernel.tools.comm import get_random_port
from neurokernel.ctrl_proc import ControlledProcess
class test_ctrl_proc(TestCase):
def setUp(self):
ctx = zmq.Context()
self.sock = ctx.socket(zmq.ROUTER)
self.port = self.sock.bind_to_random_port('tcp://*')
self.proc = ControlledProcess(self.port, 'proc')
def tearDown(self):
self.proc.terminate()
def test_ctrl_quit(self):
self.proc.start()
time.sleep(1)
self.sock.send_multipart([self.proc.id, 'quit'])
time.sleep(1)
assert not self.proc.is_alive()
if __name__ == '__main__':
main()
|
Add test of zmq-based control mechanism for ControlledProcess class.#!/usr/bin/env python
import time
from unittest import main, TestCase
import zmq
from neurokernel.tools.comm import get_random_port
from neurokernel.ctrl_proc import ControlledProcess
class test_ctrl_proc(TestCase):
def setUp(self):
ctx = zmq.Context()
self.sock = ctx.socket(zmq.ROUTER)
self.port = self.sock.bind_to_random_port('tcp://*')
self.proc = ControlledProcess(self.port, 'proc')
def tearDown(self):
self.proc.terminate()
def test_ctrl_quit(self):
self.proc.start()
time.sleep(1)
self.sock.send_multipart([self.proc.id, 'quit'])
time.sleep(1)
assert not self.proc.is_alive()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test of zmq-based control mechanism for ControlledProcess class.<commit_after>#!/usr/bin/env python
import time
from unittest import main, TestCase
import zmq
from neurokernel.tools.comm import get_random_port
from neurokernel.ctrl_proc import ControlledProcess
class test_ctrl_proc(TestCase):
def setUp(self):
ctx = zmq.Context()
self.sock = ctx.socket(zmq.ROUTER)
self.port = self.sock.bind_to_random_port('tcp://*')
self.proc = ControlledProcess(self.port, 'proc')
def tearDown(self):
self.proc.terminate()
def test_ctrl_quit(self):
self.proc.start()
time.sleep(1)
self.sock.send_multipart([self.proc.id, 'quit'])
time.sleep(1)
assert not self.proc.is_alive()
if __name__ == '__main__':
main()
|
|
f8d3e9466de6cafca7c1a493194bb3944c58c75b
|
scripts/set_hostname.py
|
scripts/set_hostname.py
|
#!/usr/bin/env python
import subprocess
from utils import file_templates
from utils.validation import is_valid_hostname
def main():
user_input = raw_input("Want to change the hostname?: Y/N")
if user_input == 'Y':
new_hostname = ''
while new_hostname == '':
user_input = raw_input("Enter a new hostname: ")
if is_valid_hostname(user_input):
new_hostname = user_input
update_file('/etc/hosts', new_hostname)
update_file('/etc/hostname', new_hostname)
subprocess.call(['/etc/init.d/hostname.sh'])
else:
print("Skipping hostname update...")
def update_file(path, new_hostname):
template_name = path.split('/')[-1]
new_file_data = file_templates.build(template_name, [new_hostname])
with open(path, 'w') as f:
f.write(new_file_data)
if __name__ == '__main__':
main()
|
Add script to update hostname
|
Add script to update hostname
|
Python
|
mit
|
projectweekend/Pi-Setup,projectweekend/Pi-Setup
|
Add script to update hostname
|
#!/usr/bin/env python
import subprocess
from utils import file_templates
from utils.validation import is_valid_hostname
def main():
user_input = raw_input("Want to change the hostname?: Y/N")
if user_input == 'Y':
new_hostname = ''
while new_hostname == '':
user_input = raw_input("Enter a new hostname: ")
if is_valid_hostname(user_input):
new_hostname = user_input
update_file('/etc/hosts', new_hostname)
update_file('/etc/hostname', new_hostname)
subprocess.call(['/etc/init.d/hostname.sh'])
else:
print("Skipping hostname update...")
def update_file(path, new_hostname):
template_name = path.split('/')[-1]
new_file_data = file_templates.build(template_name, [new_hostname])
with open(path, 'w') as f:
f.write(new_file_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to update hostname<commit_after>
|
#!/usr/bin/env python
import subprocess
from utils import file_templates
from utils.validation import is_valid_hostname
def main():
user_input = raw_input("Want to change the hostname?: Y/N")
if user_input == 'Y':
new_hostname = ''
while new_hostname == '':
user_input = raw_input("Enter a new hostname: ")
if is_valid_hostname(user_input):
new_hostname = user_input
update_file('/etc/hosts', new_hostname)
update_file('/etc/hostname', new_hostname)
subprocess.call(['/etc/init.d/hostname.sh'])
else:
print("Skipping hostname update...")
def update_file(path, new_hostname):
template_name = path.split('/')[-1]
new_file_data = file_templates.build(template_name, [new_hostname])
with open(path, 'w') as f:
f.write(new_file_data)
if __name__ == '__main__':
main()
|
Add script to update hostname#!/usr/bin/env python
import subprocess
from utils import file_templates
from utils.validation import is_valid_hostname
def main():
user_input = raw_input("Want to change the hostname?: Y/N")
if user_input == 'Y':
new_hostname = ''
while new_hostname == '':
user_input = raw_input("Enter a new hostname: ")
if is_valid_hostname(user_input):
new_hostname = user_input
update_file('/etc/hosts', new_hostname)
update_file('/etc/hostname', new_hostname)
subprocess.call(['/etc/init.d/hostname.sh'])
else:
print("Skipping hostname update...")
def update_file(path, new_hostname):
template_name = path.split('/')[-1]
new_file_data = file_templates.build(template_name, [new_hostname])
with open(path, 'w') as f:
f.write(new_file_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to update hostname<commit_after>#!/usr/bin/env python
import subprocess
from utils import file_templates
from utils.validation import is_valid_hostname
def main():
user_input = raw_input("Want to change the hostname?: Y/N")
if user_input == 'Y':
new_hostname = ''
while new_hostname == '':
user_input = raw_input("Enter a new hostname: ")
if is_valid_hostname(user_input):
new_hostname = user_input
update_file('/etc/hosts', new_hostname)
update_file('/etc/hostname', new_hostname)
subprocess.call(['/etc/init.d/hostname.sh'])
else:
print("Skipping hostname update...")
def update_file(path, new_hostname):
template_name = path.split('/')[-1]
new_file_data = file_templates.build(template_name, [new_hostname])
with open(path, 'w') as f:
f.write(new_file_data)
if __name__ == '__main__':
main()
|
|
2acb7e7da11aabe6b9c30f6197492d4150e4f496
|
test/test_words.py
|
test/test_words.py
|
import unittest
import numpy as np
import numpy.testing as test
from word2gauss.words import Vocabulary, iter_pairs
DTYPE = np.float32
class TestIterPairs(unittest.TestCase):
def test_iter_pairs(self):
np.random.seed(1234)
vocab = Vocabulary({'zero': 0, 'one': 1, 'two': 2})
actual = list(iter_pairs(['zero one two', 'one two zero'],
vocab, batch_size=2, nsamples=1))
expected = np.array([[0, 1, 0, 2, 0],
[0, 1, 1, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 2, 1],
[1, 2, 1, 0, 0],
[1, 2, 1, 2, 1],
[1, 2, 1, 1, 0],
[1, 2, 1, 2, 1],
[1, 0, 1, 2, 0],
[1, 0, 2, 0, 1],
[2, 0, 2, 2, 0],
[2, 0, 0, 0, 1]], dtype=DTYPE)
self.assertEqual(len(actual), 1)
self.assertTrue(np.all(actual[0] == expected))
if __name__ == '__main__':
unittest.main()
|
Add a test for iter_pairs
|
Add a test for iter_pairs
|
Python
|
mit
|
seomoz/word2gauss,seomoz/word2gauss
|
Add a test for iter_pairs
|
import unittest
import numpy as np
import numpy.testing as test
from word2gauss.words import Vocabulary, iter_pairs
DTYPE = np.float32
class TestIterPairs(unittest.TestCase):
def test_iter_pairs(self):
np.random.seed(1234)
vocab = Vocabulary({'zero': 0, 'one': 1, 'two': 2})
actual = list(iter_pairs(['zero one two', 'one two zero'],
vocab, batch_size=2, nsamples=1))
expected = np.array([[0, 1, 0, 2, 0],
[0, 1, 1, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 2, 1],
[1, 2, 1, 0, 0],
[1, 2, 1, 2, 1],
[1, 2, 1, 1, 0],
[1, 2, 1, 2, 1],
[1, 0, 1, 2, 0],
[1, 0, 2, 0, 1],
[2, 0, 2, 2, 0],
[2, 0, 0, 0, 1]], dtype=DTYPE)
self.assertEqual(len(actual), 1)
self.assertTrue(np.all(actual[0] == expected))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for iter_pairs<commit_after>
|
import unittest
import numpy as np
import numpy.testing as test
from word2gauss.words import Vocabulary, iter_pairs
DTYPE = np.float32
class TestIterPairs(unittest.TestCase):
def test_iter_pairs(self):
np.random.seed(1234)
vocab = Vocabulary({'zero': 0, 'one': 1, 'two': 2})
actual = list(iter_pairs(['zero one two', 'one two zero'],
vocab, batch_size=2, nsamples=1))
expected = np.array([[0, 1, 0, 2, 0],
[0, 1, 1, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 2, 1],
[1, 2, 1, 0, 0],
[1, 2, 1, 2, 1],
[1, 2, 1, 1, 0],
[1, 2, 1, 2, 1],
[1, 0, 1, 2, 0],
[1, 0, 2, 0, 1],
[2, 0, 2, 2, 0],
[2, 0, 0, 0, 1]], dtype=DTYPE)
self.assertEqual(len(actual), 1)
self.assertTrue(np.all(actual[0] == expected))
if __name__ == '__main__':
unittest.main()
|
Add a test for iter_pairs
import unittest
import numpy as np
import numpy.testing as test
from word2gauss.words import Vocabulary, iter_pairs
DTYPE = np.float32
class TestIterPairs(unittest.TestCase):
def test_iter_pairs(self):
np.random.seed(1234)
vocab = Vocabulary({'zero': 0, 'one': 1, 'two': 2})
actual = list(iter_pairs(['zero one two', 'one two zero'],
vocab, batch_size=2, nsamples=1))
expected = np.array([[0, 1, 0, 2, 0],
[0, 1, 1, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 2, 1],
[1, 2, 1, 0, 0],
[1, 2, 1, 2, 1],
[1, 2, 1, 1, 0],
[1, 2, 1, 2, 1],
[1, 0, 1, 2, 0],
[1, 0, 2, 0, 1],
[2, 0, 2, 2, 0],
[2, 0, 0, 0, 1]], dtype=DTYPE)
self.assertEqual(len(actual), 1)
self.assertTrue(np.all(actual[0] == expected))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for iter_pairs<commit_after>
import unittest
import numpy as np
import numpy.testing as test
from word2gauss.words import Vocabulary, iter_pairs
DTYPE = np.float32
class TestIterPairs(unittest.TestCase):
def test_iter_pairs(self):
np.random.seed(1234)
vocab = Vocabulary({'zero': 0, 'one': 1, 'two': 2})
actual = list(iter_pairs(['zero one two', 'one two zero'],
vocab, batch_size=2, nsamples=1))
expected = np.array([[0, 1, 0, 2, 0],
[0, 1, 1, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 2, 1],
[1, 2, 1, 0, 0],
[1, 2, 1, 2, 1],
[1, 2, 1, 1, 0],
[1, 2, 1, 2, 1],
[1, 0, 1, 2, 0],
[1, 0, 2, 0, 1],
[2, 0, 2, 2, 0],
[2, 0, 0, 0, 1]], dtype=DTYPE)
self.assertEqual(len(actual), 1)
self.assertTrue(np.all(actual[0] == expected))
if __name__ == '__main__':
unittest.main()
|
|
e91c1964a8996bba73751e1bd1679133594bb264
|
tests/run_tests.py
|
tests/run_tests.py
|
# -*- coding: utf-8 -*-
import sys
import os
from openslides.__main__ import main as openslides_main
from openslides.utils.main import setup_django_settings_module
def main():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
setup_django_settings_module(os.path.join(os.path.dirname(__file__), 'settings.py'))
sys.argv.insert(1, 'django')
sys.argv.insert(2, 'test')
return openslides_main()
if __name__ == '__main__':
main()
|
Add script tu run the tests.
|
Add script tu run the tests.
|
Python
|
mit
|
OpenSlides/openslides-protocol,OpenSlides/openslides-protocol,OpenSlides/openslides-protocol
|
Add script tu run the tests.
|
# -*- coding: utf-8 -*-
import sys
import os
from openslides.__main__ import main as openslides_main
from openslides.utils.main import setup_django_settings_module
def main():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
setup_django_settings_module(os.path.join(os.path.dirname(__file__), 'settings.py'))
sys.argv.insert(1, 'django')
sys.argv.insert(2, 'test')
return openslides_main()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script tu run the tests.<commit_after>
|
# -*- coding: utf-8 -*-
import sys
import os
from openslides.__main__ import main as openslides_main
from openslides.utils.main import setup_django_settings_module
def main():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
setup_django_settings_module(os.path.join(os.path.dirname(__file__), 'settings.py'))
sys.argv.insert(1, 'django')
sys.argv.insert(2, 'test')
return openslides_main()
if __name__ == '__main__':
main()
|
Add script tu run the tests.# -*- coding: utf-8 -*-
import sys
import os
from openslides.__main__ import main as openslides_main
from openslides.utils.main import setup_django_settings_module
def main():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
setup_django_settings_module(os.path.join(os.path.dirname(__file__), 'settings.py'))
sys.argv.insert(1, 'django')
sys.argv.insert(2, 'test')
return openslides_main()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script tu run the tests.<commit_after># -*- coding: utf-8 -*-
import sys
import os
from openslides.__main__ import main as openslides_main
from openslides.utils.main import setup_django_settings_module
def main():
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
setup_django_settings_module(os.path.join(os.path.dirname(__file__), 'settings.py'))
sys.argv.insert(1, 'django')
sys.argv.insert(2, 'test')
return openslides_main()
if __name__ == '__main__':
main()
|
|
0f9d7027494232ef9c2791939776fc62f81c7835
|
senlin/policies/base.py
|
senlin/policies/base.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class PolicyBase(object):
'''
Base class for policies.
'''
def __init__(self, name, type_name, **kwargs):
self.name = name
self.type_name = type_name
self.cooldown = 0
self.enabled = True
self.uuid = None
self.spec = {}
def pre_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be invoked
before an action.
'''
return NotImplemented
def post_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be performed
after an action.
'''
return NotImplemented
def to_dict(self):
pb_dict = {
'name': self.name,
'type': self.type_name,
'uuid': self.uuid,
'spec': self.spec,
'cooldown': self.cooldown,
}
return pb_dict
@classmethod
def from_dict(self, **kwargs):
pb = PolicyBase(**kwargs)
return pb
|
Add pre_op and post_op methods to PolicyBase
|
Add pre_op and post_op methods to PolicyBase
For all policies to be enforced, they may need a hooks that perform some
operations on the targeted clusters.
|
Python
|
apache-2.0
|
tengqm/senlin,openstack/senlin,openstack/senlin,Alzon/senlin,tengqm/senlin,tengqm/senlin-container,openstack/senlin,tengqm/senlin-container,Alzon/senlin,stackforge/senlin,stackforge/senlin
|
Add pre_op and post_op methods to PolicyBase
For all policies to be enforced, they may need a hooks that perform some
operations on the targeted clusters.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class PolicyBase(object):
'''
Base class for policies.
'''
def __init__(self, name, type_name, **kwargs):
self.name = name
self.type_name = type_name
self.cooldown = 0
self.enabled = True
self.uuid = None
self.spec = {}
def pre_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be invoked
before an action.
'''
return NotImplemented
def post_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be performed
after an action.
'''
return NotImplemented
def to_dict(self):
pb_dict = {
'name': self.name,
'type': self.type_name,
'uuid': self.uuid,
'spec': self.spec,
'cooldown': self.cooldown,
}
return pb_dict
@classmethod
def from_dict(self, **kwargs):
pb = PolicyBase(**kwargs)
return pb
|
<commit_before><commit_msg>Add pre_op and post_op methods to PolicyBase
For all policies to be enforced, they may need a hooks that perform some
operations on the targeted clusters.<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class PolicyBase(object):
'''
Base class for policies.
'''
def __init__(self, name, type_name, **kwargs):
self.name = name
self.type_name = type_name
self.cooldown = 0
self.enabled = True
self.uuid = None
self.spec = {}
def pre_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be invoked
before an action.
'''
return NotImplemented
def post_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be performed
after an action.
'''
return NotImplemented
def to_dict(self):
pb_dict = {
'name': self.name,
'type': self.type_name,
'uuid': self.uuid,
'spec': self.spec,
'cooldown': self.cooldown,
}
return pb_dict
@classmethod
def from_dict(self, **kwargs):
pb = PolicyBase(**kwargs)
return pb
|
Add pre_op and post_op methods to PolicyBase
For all policies to be enforced, they may need a hooks that perform some
operations on the targeted clusters.# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class PolicyBase(object):
'''
Base class for policies.
'''
def __init__(self, name, type_name, **kwargs):
self.name = name
self.type_name = type_name
self.cooldown = 0
self.enabled = True
self.uuid = None
self.spec = {}
def pre_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be invoked
before an action.
'''
return NotImplemented
def post_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be performed
after an action.
'''
return NotImplemented
def to_dict(self):
pb_dict = {
'name': self.name,
'type': self.type_name,
'uuid': self.uuid,
'spec': self.spec,
'cooldown': self.cooldown,
}
return pb_dict
@classmethod
def from_dict(self, **kwargs):
pb = PolicyBase(**kwargs)
return pb
|
<commit_before><commit_msg>Add pre_op and post_op methods to PolicyBase
For all policies to be enforced, they may need a hooks that perform some
operations on the targeted clusters.<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class PolicyBase(object):
'''
Base class for policies.
'''
def __init__(self, name, type_name, **kwargs):
self.name = name
self.type_name = type_name
self.cooldown = 0
self.enabled = True
self.uuid = None
self.spec = {}
def pre_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be invoked
before an action.
'''
return NotImplemented
def post_op(self, cluster_id, action):
'''
Force all subclasses to implement an operation that will be performed
after an action.
'''
return NotImplemented
def to_dict(self):
pb_dict = {
'name': self.name,
'type': self.type_name,
'uuid': self.uuid,
'spec': self.spec,
'cooldown': self.cooldown,
}
return pb_dict
@classmethod
def from_dict(self, **kwargs):
pb = PolicyBase(**kwargs)
return pb
|
|
b694c0da84f1a3f6b27b9d4a07c9f5cb2116d831
|
utils/sign_file.py
|
utils/sign_file.py
|
#! /usr/bin/env python
"""
A script that creates signed Python files.
Distributing detached signatures is boring.
"""
from __future__ import print_function
import os
import argparse
import subprocess
def get_file_encoding(filename):
"""
Get the file encoding for the file with the given filename
"""
with open(filename, 'rb') as fp:
# The encoding is usually specified on the second line
txt = fp.read().splitlines()[1]
txt = txt.decode('utf-8')
if 'encoding' in txt:
encoding = txt.split()[-1]
else:
encoding = 'utf-8' # default
return str(encoding)
def sign_file_and_get_sig(filename, encoding):
"""
Sign the file and get the signature
"""
cmd = 'gpg -bass {}'.format(filename)
ret = subprocess.Popen(cmd, shell=True).wait()
print ('-> %r' % cmd)
if ret:
raise ValueError('Could not sign the file!')
with open('{}.asc'.format(filename), 'rb') as fp:
sig = fp.read()
try:
os.remove('{}.asc'.format(filename))
except OSError:
pass
sig = sig.decode(encoding)
sig = sig.replace('\r', '').replace('\n', '\\n')
sig = sig.encode(encoding)
return sig
def sign_original_file(filename, encoding):
"""
Sign the original file
"""
sig = sign_file_and_get_sig(filename, encoding)
with open(filename, 'ab') as outfile:
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def create_signed_file(filename, encoding):
"""
Create a signed file
"""
sig = sign_file_and_get_sig(filename, encoding)
name, extension = os.path.splitext(filename)
new_file_name = '{}_signed{}'.format(name, extension)
with open(new_file_name, 'wb') as outfile, \
open(filename, 'rb') as infile:
txt = infile.read()
outfile.write(txt)
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', action='store', nargs='+',
help='Files you wish to sign')
parser.add_argument('--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite the original file'
' (sign the original file)')
opts = parser.parse_args()
return opts
if __name__ == '__main__':
opts = parse_args()
for filename in opts.filenames:
encoding = get_file_encoding(filename)
if opts.overwrite:
sign_original_file(filename, encoding)
else:
create_signed_file(filename, encoding)
|
Write a generic script signing utility
|
Write a generic script signing utility
Signed-off-by: Thomas Nagy <d86e025e60d31fa2452cf443d0fed051ea678233@gmail.com>
|
Python
|
agpl-3.0
|
hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR
|
Write a generic script signing utility
Signed-off-by: Thomas Nagy <d86e025e60d31fa2452cf443d0fed051ea678233@gmail.com>
|
#! /usr/bin/env python
"""
A script that creates signed Python files.
Distributing detached signatures is boring.
"""
from __future__ import print_function
import os
import argparse
import subprocess
def get_file_encoding(filename):
"""
Get the file encoding for the file with the given filename
"""
with open(filename, 'rb') as fp:
# The encoding is usually specified on the second line
txt = fp.read().splitlines()[1]
txt = txt.decode('utf-8')
if 'encoding' in txt:
encoding = txt.split()[-1]
else:
encoding = 'utf-8' # default
return str(encoding)
def sign_file_and_get_sig(filename, encoding):
"""
Sign the file and get the signature
"""
cmd = 'gpg -bass {}'.format(filename)
ret = subprocess.Popen(cmd, shell=True).wait()
print ('-> %r' % cmd)
if ret:
raise ValueError('Could not sign the file!')
with open('{}.asc'.format(filename), 'rb') as fp:
sig = fp.read()
try:
os.remove('{}.asc'.format(filename))
except OSError:
pass
sig = sig.decode(encoding)
sig = sig.replace('\r', '').replace('\n', '\\n')
sig = sig.encode(encoding)
return sig
def sign_original_file(filename, encoding):
"""
Sign the original file
"""
sig = sign_file_and_get_sig(filename, encoding)
with open(filename, 'ab') as outfile:
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def create_signed_file(filename, encoding):
"""
Create a signed file
"""
sig = sign_file_and_get_sig(filename, encoding)
name, extension = os.path.splitext(filename)
new_file_name = '{}_signed{}'.format(name, extension)
with open(new_file_name, 'wb') as outfile, \
open(filename, 'rb') as infile:
txt = infile.read()
outfile.write(txt)
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', action='store', nargs='+',
help='Files you wish to sign')
parser.add_argument('--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite the original file'
' (sign the original file)')
opts = parser.parse_args()
return opts
if __name__ == '__main__':
opts = parse_args()
for filename in opts.filenames:
encoding = get_file_encoding(filename)
if opts.overwrite:
sign_original_file(filename, encoding)
else:
create_signed_file(filename, encoding)
|
<commit_before><commit_msg>Write a generic script signing utility
Signed-off-by: Thomas Nagy <d86e025e60d31fa2452cf443d0fed051ea678233@gmail.com><commit_after>
|
#! /usr/bin/env python
"""
A script that creates signed Python files.
Distributing detached signatures is boring.
"""
from __future__ import print_function
import os
import argparse
import subprocess
def get_file_encoding(filename):
"""
Get the file encoding for the file with the given filename
"""
with open(filename, 'rb') as fp:
# The encoding is usually specified on the second line
txt = fp.read().splitlines()[1]
txt = txt.decode('utf-8')
if 'encoding' in txt:
encoding = txt.split()[-1]
else:
encoding = 'utf-8' # default
return str(encoding)
def sign_file_and_get_sig(filename, encoding):
"""
Sign the file and get the signature
"""
cmd = 'gpg -bass {}'.format(filename)
ret = subprocess.Popen(cmd, shell=True).wait()
print ('-> %r' % cmd)
if ret:
raise ValueError('Could not sign the file!')
with open('{}.asc'.format(filename), 'rb') as fp:
sig = fp.read()
try:
os.remove('{}.asc'.format(filename))
except OSError:
pass
sig = sig.decode(encoding)
sig = sig.replace('\r', '').replace('\n', '\\n')
sig = sig.encode(encoding)
return sig
def sign_original_file(filename, encoding):
"""
Sign the original file
"""
sig = sign_file_and_get_sig(filename, encoding)
with open(filename, 'ab') as outfile:
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def create_signed_file(filename, encoding):
"""
Create a signed file
"""
sig = sign_file_and_get_sig(filename, encoding)
name, extension = os.path.splitext(filename)
new_file_name = '{}_signed{}'.format(name, extension)
with open(new_file_name, 'wb') as outfile, \
open(filename, 'rb') as infile:
txt = infile.read()
outfile.write(txt)
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', action='store', nargs='+',
help='Files you wish to sign')
parser.add_argument('--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite the original file'
' (sign the original file)')
opts = parser.parse_args()
return opts
if __name__ == '__main__':
opts = parse_args()
for filename in opts.filenames:
encoding = get_file_encoding(filename)
if opts.overwrite:
sign_original_file(filename, encoding)
else:
create_signed_file(filename, encoding)
|
Write a generic script signing utility
Signed-off-by: Thomas Nagy <d86e025e60d31fa2452cf443d0fed051ea678233@gmail.com>#! /usr/bin/env python
"""
A script that creates signed Python files.
Distributing detached signatures is boring.
"""
from __future__ import print_function
import os
import argparse
import subprocess
def get_file_encoding(filename):
"""
Get the file encoding for the file with the given filename
"""
with open(filename, 'rb') as fp:
# The encoding is usually specified on the second line
txt = fp.read().splitlines()[1]
txt = txt.decode('utf-8')
if 'encoding' in txt:
encoding = txt.split()[-1]
else:
encoding = 'utf-8' # default
return str(encoding)
def sign_file_and_get_sig(filename, encoding):
"""
Sign the file and get the signature
"""
cmd = 'gpg -bass {}'.format(filename)
ret = subprocess.Popen(cmd, shell=True).wait()
print ('-> %r' % cmd)
if ret:
raise ValueError('Could not sign the file!')
with open('{}.asc'.format(filename), 'rb') as fp:
sig = fp.read()
try:
os.remove('{}.asc'.format(filename))
except OSError:
pass
sig = sig.decode(encoding)
sig = sig.replace('\r', '').replace('\n', '\\n')
sig = sig.encode(encoding)
return sig
def sign_original_file(filename, encoding):
"""
Sign the original file
"""
sig = sign_file_and_get_sig(filename, encoding)
with open(filename, 'ab') as outfile:
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def create_signed_file(filename, encoding):
"""
Create a signed file
"""
sig = sign_file_and_get_sig(filename, encoding)
name, extension = os.path.splitext(filename)
new_file_name = '{}_signed{}'.format(name, extension)
with open(new_file_name, 'wb') as outfile, \
open(filename, 'rb') as infile:
txt = infile.read()
outfile.write(txt)
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', action='store', nargs='+',
help='Files you wish to sign')
parser.add_argument('--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite the original file'
' (sign the original file)')
opts = parser.parse_args()
return opts
if __name__ == '__main__':
opts = parse_args()
for filename in opts.filenames:
encoding = get_file_encoding(filename)
if opts.overwrite:
sign_original_file(filename, encoding)
else:
create_signed_file(filename, encoding)
|
<commit_before><commit_msg>Write a generic script signing utility
Signed-off-by: Thomas Nagy <d86e025e60d31fa2452cf443d0fed051ea678233@gmail.com><commit_after>#! /usr/bin/env python
"""
A script that creates signed Python files.
Distributing detached signatures is boring.
"""
from __future__ import print_function
import os
import argparse
import subprocess
def get_file_encoding(filename):
"""
Get the file encoding for the file with the given filename
"""
with open(filename, 'rb') as fp:
# The encoding is usually specified on the second line
txt = fp.read().splitlines()[1]
txt = txt.decode('utf-8')
if 'encoding' in txt:
encoding = txt.split()[-1]
else:
encoding = 'utf-8' # default
return str(encoding)
def sign_file_and_get_sig(filename, encoding):
"""
Sign the file and get the signature
"""
cmd = 'gpg -bass {}'.format(filename)
ret = subprocess.Popen(cmd, shell=True).wait()
print ('-> %r' % cmd)
if ret:
raise ValueError('Could not sign the file!')
with open('{}.asc'.format(filename), 'rb') as fp:
sig = fp.read()
try:
os.remove('{}.asc'.format(filename))
except OSError:
pass
sig = sig.decode(encoding)
sig = sig.replace('\r', '').replace('\n', '\\n')
sig = sig.encode(encoding)
return sig
def sign_original_file(filename, encoding):
"""
Sign the original file
"""
sig = sign_file_and_get_sig(filename, encoding)
with open(filename, 'ab') as outfile:
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def create_signed_file(filename, encoding):
"""
Create a signed file
"""
sig = sign_file_and_get_sig(filename, encoding)
name, extension = os.path.splitext(filename)
new_file_name = '{}_signed{}'.format(name, extension)
with open(new_file_name, 'wb') as outfile, \
open(filename, 'rb') as infile:
txt = infile.read()
outfile.write(txt)
outfile.write('#'.encode(encoding))
outfile.write(sig)
outfile.write('\n'.encode(encoding))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filenames', action='store', nargs='+',
help='Files you wish to sign')
parser.add_argument('--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite the original file'
' (sign the original file)')
opts = parser.parse_args()
return opts
if __name__ == '__main__':
opts = parse_args()
for filename in opts.filenames:
encoding = get_file_encoding(filename)
if opts.overwrite:
sign_original_file(filename, encoding)
else:
create_signed_file(filename, encoding)
|
|
a6d1e508d30b1c8c8f4ee07ee60b8841160259cc
|
scripts/threadtest.py
|
scripts/threadtest.py
|
#!/usr/bin/env python
# This is a basic script that will check that
# multithreaded behavior works as expected.
# The idea is to spin up a multithreaded server
# with views that use flask-shelve, and then to
# spawn multi threaded clients.
import os
import time
import threading
from urllib2 import urlopen
from werkzeug.serving import make_server
import flask
from flask.ext import shelve
NUM_CLIENTS = 20
NUM_REQUESTS = 50
app = flask.Flask('test-flask-shelve')
app.debug = True
app.config["SHELVE_FILENAME"] = 'demodb'
shelve.init_app(app)
def make_requests(num_requests):
for i in xrange(num_requests):
urlopen('http://localhost:5000/incr/').read()
@app.route('/incr/')
def setkey():
db = shelve.get_shelve('c')
if 'counter' not in db:
db['counter'] = 0
current = db['counter']
time.sleep(0.01)
db['counter'] = current + 1
return str(db['counter']) + '\n'
@app.route('/count/')
def getkey():
db = shelve.get_shelve('r')
time.sleep(0.01)
return str(db['counter']) + '\n'
@app.route('/reset/')
def reset():
db = shelve.get_shelve('c')
time.sleep(0.01)
db['counter'] = 0
return '0\n'
if __name__ == '__main__':
server = make_server('127.0.0.1', 5000, app, threaded=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print "Starting server"
server_thread.start()
time.sleep(2)
clients = []
for i in xrange(NUM_CLIENTS):
t = threading.Thread(target=make_requests,
kwargs={'num_requests': NUM_REQUESTS})
clients.append(t)
urlopen('http://localhost:5000/reset/').read()
print "Starting clients"
start = time.time()
for client in clients:
client.start()
for client in clients:
client.join()
end = time.time()
val = int(urlopen('http://localhost:5000/count/').read().strip())
print "Expected:", NUM_CLIENTS * NUM_REQUESTS
print "Actual :", val
print "Time : %.2f" % (end - start)
os.unlink(app.config["SHELVE_FILENAME"])
os.unlink(app.config["SHELVE_LOCKFILE"])
|
Add script to verify locking behavior
|
Add script to verify locking behavior
Basically, start up a multithreaded flask server and throw
a bunch of multithreaded clients at it. Make sure the
numbers at the end make sense.
|
Python
|
bsd-3-clause
|
jamesls/flask-shelve
|
Add script to verify locking behavior
Basically, start up a multithreaded flask server and throw
a bunch of multithreaded clients at it. Make sure the
numbers at the end make sense.
|
#!/usr/bin/env python
# This is a basic script that will check that
# multithreaded behavior works as expected.
# The idea is to spin up a multithreaded server
# with views that use flask-shelve, and then to
# spawn multi threaded clients.
import os
import time
import threading
from urllib2 import urlopen
from werkzeug.serving import make_server
import flask
from flask.ext import shelve
NUM_CLIENTS = 20
NUM_REQUESTS = 50
app = flask.Flask('test-flask-shelve')
app.debug = True
app.config["SHELVE_FILENAME"] = 'demodb'
shelve.init_app(app)
def make_requests(num_requests):
for i in xrange(num_requests):
urlopen('http://localhost:5000/incr/').read()
@app.route('/incr/')
def setkey():
db = shelve.get_shelve('c')
if 'counter' not in db:
db['counter'] = 0
current = db['counter']
time.sleep(0.01)
db['counter'] = current + 1
return str(db['counter']) + '\n'
@app.route('/count/')
def getkey():
db = shelve.get_shelve('r')
time.sleep(0.01)
return str(db['counter']) + '\n'
@app.route('/reset/')
def reset():
db = shelve.get_shelve('c')
time.sleep(0.01)
db['counter'] = 0
return '0\n'
if __name__ == '__main__':
server = make_server('127.0.0.1', 5000, app, threaded=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print "Starting server"
server_thread.start()
time.sleep(2)
clients = []
for i in xrange(NUM_CLIENTS):
t = threading.Thread(target=make_requests,
kwargs={'num_requests': NUM_REQUESTS})
clients.append(t)
urlopen('http://localhost:5000/reset/').read()
print "Starting clients"
start = time.time()
for client in clients:
client.start()
for client in clients:
client.join()
end = time.time()
val = int(urlopen('http://localhost:5000/count/').read().strip())
print "Expected:", NUM_CLIENTS * NUM_REQUESTS
print "Actual :", val
print "Time : %.2f" % (end - start)
os.unlink(app.config["SHELVE_FILENAME"])
os.unlink(app.config["SHELVE_LOCKFILE"])
|
<commit_before><commit_msg>Add script to verify locking behavior
Basically, start up a multithreaded flask server and throw
a bunch of multithreaded clients at it. Make sure the
numbers at the end make sense.<commit_after>
|
#!/usr/bin/env python
# This is a basic script that will check that
# multithreaded behavior works as expected.
# The idea is to spin up a multithreaded server
# with views that use flask-shelve, and then to
# spawn multi threaded clients.
import os
import time
import threading
from urllib2 import urlopen
from werkzeug.serving import make_server
import flask
from flask.ext import shelve
NUM_CLIENTS = 20
NUM_REQUESTS = 50
app = flask.Flask('test-flask-shelve')
app.debug = True
app.config["SHELVE_FILENAME"] = 'demodb'
shelve.init_app(app)
def make_requests(num_requests):
for i in xrange(num_requests):
urlopen('http://localhost:5000/incr/').read()
@app.route('/incr/')
def setkey():
db = shelve.get_shelve('c')
if 'counter' not in db:
db['counter'] = 0
current = db['counter']
time.sleep(0.01)
db['counter'] = current + 1
return str(db['counter']) + '\n'
@app.route('/count/')
def getkey():
db = shelve.get_shelve('r')
time.sleep(0.01)
return str(db['counter']) + '\n'
@app.route('/reset/')
def reset():
db = shelve.get_shelve('c')
time.sleep(0.01)
db['counter'] = 0
return '0\n'
if __name__ == '__main__':
server = make_server('127.0.0.1', 5000, app, threaded=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print "Starting server"
server_thread.start()
time.sleep(2)
clients = []
for i in xrange(NUM_CLIENTS):
t = threading.Thread(target=make_requests,
kwargs={'num_requests': NUM_REQUESTS})
clients.append(t)
urlopen('http://localhost:5000/reset/').read()
print "Starting clients"
start = time.time()
for client in clients:
client.start()
for client in clients:
client.join()
end = time.time()
val = int(urlopen('http://localhost:5000/count/').read().strip())
print "Expected:", NUM_CLIENTS * NUM_REQUESTS
print "Actual :", val
print "Time : %.2f" % (end - start)
os.unlink(app.config["SHELVE_FILENAME"])
os.unlink(app.config["SHELVE_LOCKFILE"])
|
Add script to verify locking behavior
Basically, start up a multithreaded flask server and throw
a bunch of multithreaded clients at it. Make sure the
numbers at the end make sense.#!/usr/bin/env python
# This is a basic script that will check that
# multithreaded behavior works as expected.
# The idea is to spin up a multithreaded server
# with views that use flask-shelve, and then to
# spawn multi threaded clients.
import os
import time
import threading
from urllib2 import urlopen
from werkzeug.serving import make_server
import flask
from flask.ext import shelve
NUM_CLIENTS = 20
NUM_REQUESTS = 50
app = flask.Flask('test-flask-shelve')
app.debug = True
app.config["SHELVE_FILENAME"] = 'demodb'
shelve.init_app(app)
def make_requests(num_requests):
for i in xrange(num_requests):
urlopen('http://localhost:5000/incr/').read()
@app.route('/incr/')
def setkey():
db = shelve.get_shelve('c')
if 'counter' not in db:
db['counter'] = 0
current = db['counter']
time.sleep(0.01)
db['counter'] = current + 1
return str(db['counter']) + '\n'
@app.route('/count/')
def getkey():
db = shelve.get_shelve('r')
time.sleep(0.01)
return str(db['counter']) + '\n'
@app.route('/reset/')
def reset():
db = shelve.get_shelve('c')
time.sleep(0.01)
db['counter'] = 0
return '0\n'
if __name__ == '__main__':
server = make_server('127.0.0.1', 5000, app, threaded=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print "Starting server"
server_thread.start()
time.sleep(2)
clients = []
for i in xrange(NUM_CLIENTS):
t = threading.Thread(target=make_requests,
kwargs={'num_requests': NUM_REQUESTS})
clients.append(t)
urlopen('http://localhost:5000/reset/').read()
print "Starting clients"
start = time.time()
for client in clients:
client.start()
for client in clients:
client.join()
end = time.time()
val = int(urlopen('http://localhost:5000/count/').read().strip())
print "Expected:", NUM_CLIENTS * NUM_REQUESTS
print "Actual :", val
print "Time : %.2f" % (end - start)
os.unlink(app.config["SHELVE_FILENAME"])
os.unlink(app.config["SHELVE_LOCKFILE"])
|
<commit_before><commit_msg>Add script to verify locking behavior
Basically, start up a multithreaded flask server and throw
a bunch of multithreaded clients at it. Make sure the
numbers at the end make sense.<commit_after>#!/usr/bin/env python
# This is a basic script that will check that
# multithreaded behavior works as expected.
# The idea is to spin up a multithreaded server
# with views that use flask-shelve, and then to
# spawn multi threaded clients.
import os
import time
import threading
from urllib2 import urlopen
from werkzeug.serving import make_server
import flask
from flask.ext import shelve
NUM_CLIENTS = 20
NUM_REQUESTS = 50
app = flask.Flask('test-flask-shelve')
app.debug = True
app.config["SHELVE_FILENAME"] = 'demodb'
shelve.init_app(app)
def make_requests(num_requests):
for i in xrange(num_requests):
urlopen('http://localhost:5000/incr/').read()
@app.route('/incr/')
def setkey():
db = shelve.get_shelve('c')
if 'counter' not in db:
db['counter'] = 0
current = db['counter']
time.sleep(0.01)
db['counter'] = current + 1
return str(db['counter']) + '\n'
@app.route('/count/')
def getkey():
db = shelve.get_shelve('r')
time.sleep(0.01)
return str(db['counter']) + '\n'
@app.route('/reset/')
def reset():
db = shelve.get_shelve('c')
time.sleep(0.01)
db['counter'] = 0
return '0\n'
if __name__ == '__main__':
server = make_server('127.0.0.1', 5000, app, threaded=True)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print "Starting server"
server_thread.start()
time.sleep(2)
clients = []
for i in xrange(NUM_CLIENTS):
t = threading.Thread(target=make_requests,
kwargs={'num_requests': NUM_REQUESTS})
clients.append(t)
urlopen('http://localhost:5000/reset/').read()
print "Starting clients"
start = time.time()
for client in clients:
client.start()
for client in clients:
client.join()
end = time.time()
val = int(urlopen('http://localhost:5000/count/').read().strip())
print "Expected:", NUM_CLIENTS * NUM_REQUESTS
print "Actual :", val
print "Time : %.2f" % (end - start)
os.unlink(app.config["SHELVE_FILENAME"])
os.unlink(app.config["SHELVE_LOCKFILE"])
|
|
b4dfc06e55cf1f8e0e6d4e0bb31efb35614ea57a
|
gntp/test/test_config.py
|
gntp/test/test_config.py
|
# Test when we don't have a config file
# Since we don't know if the user will have a config file or not when
# running this test, we will move it out of the way and then move it back
# when we're done
import os
from gntp.test import GNTPTestCase
from gntp.config import GrowlNotifier
ORIGINAL_CONFIG = os.path.expanduser('~/.gntp')
BACKUP_CONFIG = os.path.expanduser('~/.gntp.backup')
class ConfigTests(GNTPTestCase):
def setUp(self):
if os.path.exists(ORIGINAL_CONFIG):
os.rename(ORIGINAL_CONFIG, BACKUP_CONFIG)
self.growl = GrowlNotifier('GNTP unittest', ['Testing'])
self.growl.register()
def test_missing_config(self):
self.assertTrue(self._notify(description='No config file test'))
def tearDown(self):
if os.path.exists(BACKUP_CONFIG):
os.rename(BACKUP_CONFIG, ORIGINAL_CONFIG)
|
Add test for config file
|
Add test for config file
|
Python
|
mit
|
kfdm/gntp
|
Add test for config file
|
# Test when we don't have a config file
# Since we don't know if the user will have a config file or not when
# running this test, we will move it out of the way and then move it back
# when we're done
import os
from gntp.test import GNTPTestCase
from gntp.config import GrowlNotifier
ORIGINAL_CONFIG = os.path.expanduser('~/.gntp')
BACKUP_CONFIG = os.path.expanduser('~/.gntp.backup')
class ConfigTests(GNTPTestCase):
def setUp(self):
if os.path.exists(ORIGINAL_CONFIG):
os.rename(ORIGINAL_CONFIG, BACKUP_CONFIG)
self.growl = GrowlNotifier('GNTP unittest', ['Testing'])
self.growl.register()
def test_missing_config(self):
self.assertTrue(self._notify(description='No config file test'))
def tearDown(self):
if os.path.exists(BACKUP_CONFIG):
os.rename(BACKUP_CONFIG, ORIGINAL_CONFIG)
|
<commit_before><commit_msg>Add test for config file<commit_after>
|
# Test when we don't have a config file
# Since we don't know if the user will have a config file or not when
# running this test, we will move it out of the way and then move it back
# when we're done
import os
from gntp.test import GNTPTestCase
from gntp.config import GrowlNotifier
ORIGINAL_CONFIG = os.path.expanduser('~/.gntp')
BACKUP_CONFIG = os.path.expanduser('~/.gntp.backup')
class ConfigTests(GNTPTestCase):
def setUp(self):
if os.path.exists(ORIGINAL_CONFIG):
os.rename(ORIGINAL_CONFIG, BACKUP_CONFIG)
self.growl = GrowlNotifier('GNTP unittest', ['Testing'])
self.growl.register()
def test_missing_config(self):
self.assertTrue(self._notify(description='No config file test'))
def tearDown(self):
if os.path.exists(BACKUP_CONFIG):
os.rename(BACKUP_CONFIG, ORIGINAL_CONFIG)
|
Add test for config file# Test when we don't have a config file
# Since we don't know if the user will have a config file or not when
# running this test, we will move it out of the way and then move it back
# when we're done
import os
from gntp.test import GNTPTestCase
from gntp.config import GrowlNotifier
ORIGINAL_CONFIG = os.path.expanduser('~/.gntp')
BACKUP_CONFIG = os.path.expanduser('~/.gntp.backup')
class ConfigTests(GNTPTestCase):
def setUp(self):
if os.path.exists(ORIGINAL_CONFIG):
os.rename(ORIGINAL_CONFIG, BACKUP_CONFIG)
self.growl = GrowlNotifier('GNTP unittest', ['Testing'])
self.growl.register()
def test_missing_config(self):
self.assertTrue(self._notify(description='No config file test'))
def tearDown(self):
if os.path.exists(BACKUP_CONFIG):
os.rename(BACKUP_CONFIG, ORIGINAL_CONFIG)
|
<commit_before><commit_msg>Add test for config file<commit_after># Test when we don't have a config file
# Since we don't know if the user will have a config file or not when
# running this test, we will move it out of the way and then move it back
# when we're done
import os
from gntp.test import GNTPTestCase
from gntp.config import GrowlNotifier
ORIGINAL_CONFIG = os.path.expanduser('~/.gntp')
BACKUP_CONFIG = os.path.expanduser('~/.gntp.backup')
class ConfigTests(GNTPTestCase):
def setUp(self):
if os.path.exists(ORIGINAL_CONFIG):
os.rename(ORIGINAL_CONFIG, BACKUP_CONFIG)
self.growl = GrowlNotifier('GNTP unittest', ['Testing'])
self.growl.register()
def test_missing_config(self):
self.assertTrue(self._notify(description='No config file test'))
def tearDown(self):
if os.path.exists(BACKUP_CONFIG):
os.rename(BACKUP_CONFIG, ORIGINAL_CONFIG)
|
|
85f437f300a076001d2efc813c40eb19d14dbbe9
|
Chapter-1/NCHS-school-song-prgm.py
|
Chapter-1/NCHS-school-song-prgm.py
|
#David Hickox
# Jan 12 17
# School Song Program
# Displays school song
#actualy prints the song
print("All hail our school NCHS\nWe'll sing your praises NCHS\n\nWith all our might\nWe will always make a fight")
#waits for an enter press to exit the program
input("\nPress Enter to continue")
|
Create the school song program
|
Create the school song program
|
Python
|
mit
|
dwhickox/NCHS-Programming-1-Python-Programs
|
Create the school song program
|
#David Hickox
# Jan 12 17
# School Song Program
# Displays school song
#actualy prints the song
print("All hail our school NCHS\nWe'll sing your praises NCHS\n\nWith all our might\nWe will always make a fight")
#waits for an enter press to exit the program
input("\nPress Enter to continue")
|
<commit_before><commit_msg>Create the school song program<commit_after>
|
#David Hickox
# Jan 12 17
# School Song Program
# Displays school song
#actualy prints the song
print("All hail our school NCHS\nWe'll sing your praises NCHS\n\nWith all our might\nWe will always make a fight")
#waits for an enter press to exit the program
input("\nPress Enter to continue")
|
Create the school song program#David Hickox
# Jan 12 17
# School Song Program
# Displays school song
#actualy prints the song
print("All hail our school NCHS\nWe'll sing your praises NCHS\n\nWith all our might\nWe will always make a fight")
#waits for an enter press to exit the program
input("\nPress Enter to continue")
|
<commit_before><commit_msg>Create the school song program<commit_after>#David Hickox
# Jan 12 17
# School Song Program
# Displays school song
#actualy prints the song
print("All hail our school NCHS\nWe'll sing your praises NCHS\n\nWith all our might\nWe will always make a fight")
#waits for an enter press to exit the program
input("\nPress Enter to continue")
|
|
7de811bec7478efb8398a90f7f3c85e8427fb114
|
test_computeengine.py
|
test_computeengine.py
|
from computeengine import Computation, States
def test_basic():
def b(a):
return a + 1
def c(a):
return 2 * a
def d(b, c):
return b + c
cpu = Computation()
cpu.add_node("a")
cpu.add_node("b", b, ["a"])
cpu.add_node("c", c, ["a"])
cpu.add_node("d", d, ["b", "c"])
assert cpu.dag.node['a']['state'] == States.UNINITIALIZED
assert cpu.dag.node['b']['state'] == States.UNINITIALIZED
assert cpu.dag.node['c']['state'] == States.UNINITIALIZED
assert cpu.dag.node['d']['state'] == States.UNINITIALIZED
cpu.insert("a", 1)
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.COMPUTABLE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 1
cpu.compute_all()
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.UPTODATE
assert cpu.dag.node['d']['state'] == States.UPTODATE
assert cpu.dag.node['a']['value'] == 1
assert cpu.dag.node['b']['value'] == 2
assert cpu.dag.node['c']['value'] == 2
assert cpu.dag.node['d']['value'] == 4
cpu.insert("a", 2)
cpu.compute("b")
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 2
assert cpu.dag.node['b']['value'] == 3
assert set(cpu._get_calc_nodes("d")) == set(['c', 'd'])
|
Add simple unit test for compute engine.
|
Add simple unit test for compute engine.
|
Python
|
bsd-3-clause
|
janusassetallocation/loman
|
Add simple unit test for compute engine.
|
from computeengine import Computation, States
def test_basic():
def b(a):
return a + 1
def c(a):
return 2 * a
def d(b, c):
return b + c
cpu = Computation()
cpu.add_node("a")
cpu.add_node("b", b, ["a"])
cpu.add_node("c", c, ["a"])
cpu.add_node("d", d, ["b", "c"])
assert cpu.dag.node['a']['state'] == States.UNINITIALIZED
assert cpu.dag.node['b']['state'] == States.UNINITIALIZED
assert cpu.dag.node['c']['state'] == States.UNINITIALIZED
assert cpu.dag.node['d']['state'] == States.UNINITIALIZED
cpu.insert("a", 1)
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.COMPUTABLE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 1
cpu.compute_all()
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.UPTODATE
assert cpu.dag.node['d']['state'] == States.UPTODATE
assert cpu.dag.node['a']['value'] == 1
assert cpu.dag.node['b']['value'] == 2
assert cpu.dag.node['c']['value'] == 2
assert cpu.dag.node['d']['value'] == 4
cpu.insert("a", 2)
cpu.compute("b")
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 2
assert cpu.dag.node['b']['value'] == 3
assert set(cpu._get_calc_nodes("d")) == set(['c', 'd'])
|
<commit_before><commit_msg>Add simple unit test for compute engine.<commit_after>
|
from computeengine import Computation, States
def test_basic():
def b(a):
return a + 1
def c(a):
return 2 * a
def d(b, c):
return b + c
cpu = Computation()
cpu.add_node("a")
cpu.add_node("b", b, ["a"])
cpu.add_node("c", c, ["a"])
cpu.add_node("d", d, ["b", "c"])
assert cpu.dag.node['a']['state'] == States.UNINITIALIZED
assert cpu.dag.node['b']['state'] == States.UNINITIALIZED
assert cpu.dag.node['c']['state'] == States.UNINITIALIZED
assert cpu.dag.node['d']['state'] == States.UNINITIALIZED
cpu.insert("a", 1)
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.COMPUTABLE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 1
cpu.compute_all()
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.UPTODATE
assert cpu.dag.node['d']['state'] == States.UPTODATE
assert cpu.dag.node['a']['value'] == 1
assert cpu.dag.node['b']['value'] == 2
assert cpu.dag.node['c']['value'] == 2
assert cpu.dag.node['d']['value'] == 4
cpu.insert("a", 2)
cpu.compute("b")
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 2
assert cpu.dag.node['b']['value'] == 3
assert set(cpu._get_calc_nodes("d")) == set(['c', 'd'])
|
Add simple unit test for compute engine.from computeengine import Computation, States
def test_basic():
def b(a):
return a + 1
def c(a):
return 2 * a
def d(b, c):
return b + c
cpu = Computation()
cpu.add_node("a")
cpu.add_node("b", b, ["a"])
cpu.add_node("c", c, ["a"])
cpu.add_node("d", d, ["b", "c"])
assert cpu.dag.node['a']['state'] == States.UNINITIALIZED
assert cpu.dag.node['b']['state'] == States.UNINITIALIZED
assert cpu.dag.node['c']['state'] == States.UNINITIALIZED
assert cpu.dag.node['d']['state'] == States.UNINITIALIZED
cpu.insert("a", 1)
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.COMPUTABLE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 1
cpu.compute_all()
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.UPTODATE
assert cpu.dag.node['d']['state'] == States.UPTODATE
assert cpu.dag.node['a']['value'] == 1
assert cpu.dag.node['b']['value'] == 2
assert cpu.dag.node['c']['value'] == 2
assert cpu.dag.node['d']['value'] == 4
cpu.insert("a", 2)
cpu.compute("b")
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 2
assert cpu.dag.node['b']['value'] == 3
assert set(cpu._get_calc_nodes("d")) == set(['c', 'd'])
|
<commit_before><commit_msg>Add simple unit test for compute engine.<commit_after>from computeengine import Computation, States
def test_basic():
def b(a):
return a + 1
def c(a):
return 2 * a
def d(b, c):
return b + c
cpu = Computation()
cpu.add_node("a")
cpu.add_node("b", b, ["a"])
cpu.add_node("c", c, ["a"])
cpu.add_node("d", d, ["b", "c"])
assert cpu.dag.node['a']['state'] == States.UNINITIALIZED
assert cpu.dag.node['b']['state'] == States.UNINITIALIZED
assert cpu.dag.node['c']['state'] == States.UNINITIALIZED
assert cpu.dag.node['d']['state'] == States.UNINITIALIZED
cpu.insert("a", 1)
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.COMPUTABLE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 1
cpu.compute_all()
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.UPTODATE
assert cpu.dag.node['d']['state'] == States.UPTODATE
assert cpu.dag.node['a']['value'] == 1
assert cpu.dag.node['b']['value'] == 2
assert cpu.dag.node['c']['value'] == 2
assert cpu.dag.node['d']['value'] == 4
cpu.insert("a", 2)
cpu.compute("b")
assert cpu.dag.node['a']['state'] == States.UPTODATE
assert cpu.dag.node['b']['state'] == States.UPTODATE
assert cpu.dag.node['c']['state'] == States.COMPUTABLE
assert cpu.dag.node['d']['state'] == States.STALE
assert cpu.dag.node['a']['value'] == 2
assert cpu.dag.node['b']['value'] == 3
assert set(cpu._get_calc_nodes("d")) == set(['c', 'd'])
|
|
732f0490c83253aa6504506e0271268ddc979c53
|
OIPA/iati/tests/test_activity_models.py
|
OIPA/iati/tests/test_activity_models.py
|
from django.test import TestCase
from iati.models import Activity
from iati.factory.utils import _create_test_activity
class ActivityModelTestCase(TestCase):
def test_create_activity(self):
self.assertEqual(Activity.objects.count(), 0)
_create_test_activity(id='100000', iati_identifier='100000')
self.assertIsNotNone(Activity.objects.get(pk='100000'))
def test_delete_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
def test_delete_related_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
assert activity.relatedactivity_set.count() == 1 # _create_test_activity() creates 1 related activity
# save related activity pk to query fresh object after deleting main activity
related_activity = activity.relatedactivity_set.all()[0]
related_activity_pk = related_activity.pk
# Deleting an activity also deletes RelatedActivity but not the RelatedActivity.ref_activity instance
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
self.assertEqual(Activity.objects.filter(pk=related_activity_pk).count(), 0)
|
Add unit tests for Activity model deletions
|
Add unit tests for Activity model deletions
|
Python
|
agpl-3.0
|
openaid-IATI/OIPA,zimmerman-zimmerman/OIPA,openaid-IATI/OIPA,openaid-IATI/OIPA,zimmerman-zimmerman/OIPA,zimmerman-zimmerman/OIPA,openaid-IATI/OIPA,openaid-IATI/OIPA,zimmerman-zimmerman/OIPA,zimmerman-zimmerman/OIPA
|
Add unit tests for Activity model deletions
|
from django.test import TestCase
from iati.models import Activity
from iati.factory.utils import _create_test_activity
class ActivityModelTestCase(TestCase):
def test_create_activity(self):
self.assertEqual(Activity.objects.count(), 0)
_create_test_activity(id='100000', iati_identifier='100000')
self.assertIsNotNone(Activity.objects.get(pk='100000'))
def test_delete_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
def test_delete_related_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
assert activity.relatedactivity_set.count() == 1 # _create_test_activity() creates 1 related activity
# save related activity pk to query fresh object after deleting main activity
related_activity = activity.relatedactivity_set.all()[0]
related_activity_pk = related_activity.pk
# Deleting an activity also deletes RelatedActivity but not the RelatedActivity.ref_activity instance
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
self.assertEqual(Activity.objects.filter(pk=related_activity_pk).count(), 0)
|
<commit_before><commit_msg>Add unit tests for Activity model deletions<commit_after>
|
from django.test import TestCase
from iati.models import Activity
from iati.factory.utils import _create_test_activity
class ActivityModelTestCase(TestCase):
def test_create_activity(self):
self.assertEqual(Activity.objects.count(), 0)
_create_test_activity(id='100000', iati_identifier='100000')
self.assertIsNotNone(Activity.objects.get(pk='100000'))
def test_delete_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
def test_delete_related_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
assert activity.relatedactivity_set.count() == 1 # _create_test_activity() creates 1 related activity
# save related activity pk to query fresh object after deleting main activity
related_activity = activity.relatedactivity_set.all()[0]
related_activity_pk = related_activity.pk
# Deleting an activity also deletes RelatedActivity but not the RelatedActivity.ref_activity instance
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
self.assertEqual(Activity.objects.filter(pk=related_activity_pk).count(), 0)
|
Add unit tests for Activity model deletionsfrom django.test import TestCase
from iati.models import Activity
from iati.factory.utils import _create_test_activity
class ActivityModelTestCase(TestCase):
def test_create_activity(self):
self.assertEqual(Activity.objects.count(), 0)
_create_test_activity(id='100000', iati_identifier='100000')
self.assertIsNotNone(Activity.objects.get(pk='100000'))
def test_delete_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
def test_delete_related_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
assert activity.relatedactivity_set.count() == 1 # _create_test_activity() creates 1 related activity
# save related activity pk to query fresh object after deleting main activity
related_activity = activity.relatedactivity_set.all()[0]
related_activity_pk = related_activity.pk
# Deleting an activity also deletes RelatedActivity but not the RelatedActivity.ref_activity instance
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
self.assertEqual(Activity.objects.filter(pk=related_activity_pk).count(), 0)
|
<commit_before><commit_msg>Add unit tests for Activity model deletions<commit_after>from django.test import TestCase
from iati.models import Activity
from iati.factory.utils import _create_test_activity
class ActivityModelTestCase(TestCase):
def test_create_activity(self):
self.assertEqual(Activity.objects.count(), 0)
_create_test_activity(id='100000', iati_identifier='100000')
self.assertIsNotNone(Activity.objects.get(pk='100000'))
def test_delete_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
def test_delete_related_activity(self):
activity = _create_test_activity(id='100000', iati_identifier='100000')
assert activity.relatedactivity_set.count() == 1 # _create_test_activity() creates 1 related activity
# save related activity pk to query fresh object after deleting main activity
related_activity = activity.relatedactivity_set.all()[0]
related_activity_pk = related_activity.pk
# Deleting an activity also deletes RelatedActivity but not the RelatedActivity.ref_activity instance
activity.delete()
self.assertEqual(Activity.objects.filter(pk='100000').count(), 0)
self.assertEqual(Activity.objects.filter(pk=related_activity_pk).count(), 0)
|
|
106fc0f8bae7c776a8f6c7dcec2947420492d118
|
homographynet/callbacks.py
|
homographynet/callbacks.py
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._iteration = 1
def on_batch_begin(self, batch, logs=None):
if self._iteration % self._step_size == 0:
self._lr *= self._gamma
K.set_value(self.model.optimizer.lr, self._lr)
print('New learning rate:', self._lr)
self._iteration += 1
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._base_lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._steps = 0
def on_epoch_begin(self, epoch, logs=None):
self._steps = epoch * self.params['steps']
def on_batch_begin(self, batch, logs=None):
self._steps += 1
if self._steps % self._step_size == 0:
exp = int(self._steps / self._step_size)
lr = self._base_lr * (self._gamma ** exp)
K.set_value(self.model.optimizer.lr, lr)
print('New learning rate:', lr)
|
Fix calculation of current steps when starting with epoch != 0
|
Fix calculation of current steps when starting with epoch != 0
|
Python
|
apache-2.0
|
baudm/HomographyNet
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._iteration = 1
def on_batch_begin(self, batch, logs=None):
if self._iteration % self._step_size == 0:
self._lr *= self._gamma
K.set_value(self.model.optimizer.lr, self._lr)
print('New learning rate:', self._lr)
self._iteration += 1
Fix calculation of current steps when starting with epoch != 0
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._base_lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._steps = 0
def on_epoch_begin(self, epoch, logs=None):
self._steps = epoch * self.params['steps']
def on_batch_begin(self, batch, logs=None):
self._steps += 1
if self._steps % self._step_size == 0:
exp = int(self._steps / self._step_size)
lr = self._base_lr * (self._gamma ** exp)
K.set_value(self.model.optimizer.lr, lr)
print('New learning rate:', lr)
|
<commit_before>#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._iteration = 1
def on_batch_begin(self, batch, logs=None):
if self._iteration % self._step_size == 0:
self._lr *= self._gamma
K.set_value(self.model.optimizer.lr, self._lr)
print('New learning rate:', self._lr)
self._iteration += 1
<commit_msg>Fix calculation of current steps when starting with epoch != 0<commit_after>
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._base_lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._steps = 0
def on_epoch_begin(self, epoch, logs=None):
self._steps = epoch * self.params['steps']
def on_batch_begin(self, batch, logs=None):
self._steps += 1
if self._steps % self._step_size == 0:
exp = int(self._steps / self._step_size)
lr = self._base_lr * (self._gamma ** exp)
K.set_value(self.model.optimizer.lr, lr)
print('New learning rate:', lr)
|
#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._iteration = 1
def on_batch_begin(self, batch, logs=None):
if self._iteration % self._step_size == 0:
self._lr *= self._gamma
K.set_value(self.model.optimizer.lr, self._lr)
print('New learning rate:', self._lr)
self._iteration += 1
Fix calculation of current steps when starting with epoch != 0#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._base_lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._steps = 0
def on_epoch_begin(self, epoch, logs=None):
self._steps = epoch * self.params['steps']
def on_batch_begin(self, batch, logs=None):
self._steps += 1
if self._steps % self._step_size == 0:
exp = int(self._steps / self._step_size)
lr = self._base_lr * (self._gamma ** exp)
K.set_value(self.model.optimizer.lr, lr)
print('New learning rate:', lr)
|
<commit_before>#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._iteration = 1
def on_batch_begin(self, batch, logs=None):
if self._iteration % self._step_size == 0:
self._lr *= self._gamma
K.set_value(self.model.optimizer.lr, self._lr)
print('New learning rate:', self._lr)
self._iteration += 1
<commit_msg>Fix calculation of current steps when starting with epoch != 0<commit_after>#!/usr/bin/env python
from keras.callbacks import Callback
import keras.backend as K
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
See Caffe SGD docs
"""
def __init__(self, base_lr, gamma, step_size):
super().__init__()
self._base_lr = base_lr
self._gamma = gamma
self._step_size = step_size
self._steps = 0
def on_epoch_begin(self, epoch, logs=None):
self._steps = epoch * self.params['steps']
def on_batch_begin(self, batch, logs=None):
self._steps += 1
if self._steps % self._step_size == 0:
exp = int(self._steps / self._step_size)
lr = self._base_lr * (self._gamma ** exp)
K.set_value(self.model.optimizer.lr, lr)
print('New learning rate:', lr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.