commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bd93c02ab7917d570a74cf151dfb5789c3bf174
|
scripts/remove_concepts_after.py
|
scripts/remove_concepts_after.py
|
# An entirely untested script to delete all the concepts in the
# CATMAID database for a particular project.
# Mark Longair 2010
import os
from jarray import array
from java.sql import DriverManager, Connection, SQLException, Types
# FIXME: Just hardcode the user_id and project_id for the moment
user_id = 3
project_id = 4
# Set up the JDBC connection:
try:
Class.forName("org.postgresql.Driver")
except:
IJ.log("Failed to find the postgresql driver...")
raise
catmaid_db_user = None
catmaid_db_password = None
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
c = DriverManager.getConnection("jdbc:postgresql://localhost/catmaid",
catmaid_db_user,
catmaid_db_password)
def run():
# FIXME: ask in a dialog for the ID instead
first_id = 3859376
where = ' where id > %d'%(first_id,))
s = c.createStatement('delete from treenode_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from connector_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance'+where)
s.executeQuery()
s = c.createStatement('alter table treenode drop constraint treenode_parent_id_fkey')
s.executeQuery()
s = c.createStatement('delete from treenode'+where)
s.executeQuery()
s = c.createStatement('alter table only treenode add constraint treenode_parent_id_fkey foreign key (parent_id) REFERENCES treenode(id)');
s.executeQuery()
s = c.createStatement('delete from relation'+where)
s.executeQuery()
s = c.createStatement('delete from connector'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance_class_instance'+where)
s.executeQuery()
|
Add a brutal script for removing concepts in bulk while testing
|
Add a brutal script for removing concepts in bulk while testing
|
Python
|
agpl-3.0
|
fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID
|
Add a brutal script for removing concepts in bulk while testing
|
# An entirely untested script to delete all the concepts in the
# CATMAID database for a particular project.
# Mark Longair 2010
import os
from jarray import array
from java.sql import DriverManager, Connection, SQLException, Types
# FIXME: Just hardcode the user_id and project_id for the moment
user_id = 3
project_id = 4
# Set up the JDBC connection:
try:
Class.forName("org.postgresql.Driver")
except:
IJ.log("Failed to find the postgresql driver...")
raise
catmaid_db_user = None
catmaid_db_password = None
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
c = DriverManager.getConnection("jdbc:postgresql://localhost/catmaid",
catmaid_db_user,
catmaid_db_password)
def run():
# FIXME: ask in a dialog for the ID instead
first_id = 3859376
where = ' where id > %d'%(first_id,))
s = c.createStatement('delete from treenode_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from connector_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance'+where)
s.executeQuery()
s = c.createStatement('alter table treenode drop constraint treenode_parent_id_fkey')
s.executeQuery()
s = c.createStatement('delete from treenode'+where)
s.executeQuery()
s = c.createStatement('alter table only treenode add constraint treenode_parent_id_fkey foreign key (parent_id) REFERENCES treenode(id)');
s.executeQuery()
s = c.createStatement('delete from relation'+where)
s.executeQuery()
s = c.createStatement('delete from connector'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance_class_instance'+where)
s.executeQuery()
|
<commit_before><commit_msg>Add a brutal script for removing concepts in bulk while testing<commit_after>
|
# An entirely untested script to delete all the concepts in the
# CATMAID database for a particular project.
# Mark Longair 2010
import os
from jarray import array
from java.sql import DriverManager, Connection, SQLException, Types
# FIXME: Just hardcode the user_id and project_id for the moment
user_id = 3
project_id = 4
# Set up the JDBC connection:
try:
Class.forName("org.postgresql.Driver")
except:
IJ.log("Failed to find the postgresql driver...")
raise
catmaid_db_user = None
catmaid_db_password = None
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
c = DriverManager.getConnection("jdbc:postgresql://localhost/catmaid",
catmaid_db_user,
catmaid_db_password)
def run():
# FIXME: ask in a dialog for the ID instead
first_id = 3859376
where = ' where id > %d'%(first_id,))
s = c.createStatement('delete from treenode_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from connector_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance'+where)
s.executeQuery()
s = c.createStatement('alter table treenode drop constraint treenode_parent_id_fkey')
s.executeQuery()
s = c.createStatement('delete from treenode'+where)
s.executeQuery()
s = c.createStatement('alter table only treenode add constraint treenode_parent_id_fkey foreign key (parent_id) REFERENCES treenode(id)');
s.executeQuery()
s = c.createStatement('delete from relation'+where)
s.executeQuery()
s = c.createStatement('delete from connector'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance_class_instance'+where)
s.executeQuery()
|
Add a brutal script for removing concepts in bulk while testing# An entirely untested script to delete all the concepts in the
# CATMAID database for a particular project.
# Mark Longair 2010
import os
from jarray import array
from java.sql import DriverManager, Connection, SQLException, Types
# FIXME: Just hardcode the user_id and project_id for the moment
user_id = 3
project_id = 4
# Set up the JDBC connection:
try:
Class.forName("org.postgresql.Driver")
except:
IJ.log("Failed to find the postgresql driver...")
raise
catmaid_db_user = None
catmaid_db_password = None
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
c = DriverManager.getConnection("jdbc:postgresql://localhost/catmaid",
catmaid_db_user,
catmaid_db_password)
def run():
# FIXME: ask in a dialog for the ID instead
first_id = 3859376
where = ' where id > %d'%(first_id,))
s = c.createStatement('delete from treenode_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from connector_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance'+where)
s.executeQuery()
s = c.createStatement('alter table treenode drop constraint treenode_parent_id_fkey')
s.executeQuery()
s = c.createStatement('delete from treenode'+where)
s.executeQuery()
s = c.createStatement('alter table only treenode add constraint treenode_parent_id_fkey foreign key (parent_id) REFERENCES treenode(id)');
s.executeQuery()
s = c.createStatement('delete from relation'+where)
s.executeQuery()
s = c.createStatement('delete from connector'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance_class_instance'+where)
s.executeQuery()
|
<commit_before><commit_msg>Add a brutal script for removing concepts in bulk while testing<commit_after># An entirely untested script to delete all the concepts in the
# CATMAID database for a particular project.
# Mark Longair 2010
import os
from jarray import array
from java.sql import DriverManager, Connection, SQLException, Types
# FIXME: Just hardcode the user_id and project_id for the moment
user_id = 3
project_id = 4
# Set up the JDBC connection:
try:
Class.forName("org.postgresql.Driver")
except:
IJ.log("Failed to find the postgresql driver...")
raise
catmaid_db_user = None
catmaid_db_password = None
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
c = DriverManager.getConnection("jdbc:postgresql://localhost/catmaid",
catmaid_db_user,
catmaid_db_password)
def run():
# FIXME: ask in a dialog for the ID instead
first_id = 3859376
where = ' where id > %d'%(first_id,))
s = c.createStatement('delete from treenode_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from connector_class_instance'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance'+where)
s.executeQuery()
s = c.createStatement('alter table treenode drop constraint treenode_parent_id_fkey')
s.executeQuery()
s = c.createStatement('delete from treenode'+where)
s.executeQuery()
s = c.createStatement('alter table only treenode add constraint treenode_parent_id_fkey foreign key (parent_id) REFERENCES treenode(id)');
s.executeQuery()
s = c.createStatement('delete from relation'+where)
s.executeQuery()
s = c.createStatement('delete from connector'+where)
s.executeQuery()
s = c.createStatement('delete from class_instance_class_instance'+where)
s.executeQuery()
|
|
9e388ad5b78967f87a0b3b55235bd1e19183c152
|
tests/models/spells/test_paladin_spells.py
|
tests/models/spells/test_paladin_spells.py
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.paladin_spells_template import PaladinSpellsSchema
from spells import PaladinSpell
from models.items.item_template import ItemTemplateSchema
from models.spells.spell_dots import DotSchema
from buffs import BeneficialBuff, DoT
class PaladinSpellsSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the values in the Schema are as expected
And that the convert_to_paladin_spell_object function works
"""
self.spell_entry = 4
self.spell_name = 'Melting Strike'
self.expected_spell = PaladinSpell(name=self.spell_name, rank=1, damage1=3, damage2=0, damage3=0,
heal1=0, heal2=0, heal3=0, mana_cost=6, cooldown=3,
beneficial_effect=None, harmful_effect=None)
def test_schema_values(self):
""" Load a schema object and assert that every value is as expected"""
loaded_schema: PaladinSpellsSchema = session.query(PaladinSpellsSchema).get(self.spell_entry)
self.assertTrue(isinstance(loaded_schema.id, int))
self.assertTrue(isinstance(loaded_schema.name, str))
self.assertTrue(isinstance(loaded_schema.rank, int))
self.assertTrue(isinstance(loaded_schema.level_required, int))
self.assertTrue(isinstance(loaded_schema.damage1, int))
self.assertTrue(isinstance(loaded_schema.damage2, int))
self.assertTrue(isinstance(loaded_schema.damage3, int))
self.assertTrue(isinstance(loaded_schema.heal1, int))
self.assertTrue(isinstance(loaded_schema.heal2, int))
self.assertTrue(isinstance(loaded_schema.heal3, int))
self.assertTrue(isinstance(loaded_schema.mana_cost, int))
self.assertIsNone(loaded_schema.beneficial_effect)
self.assertTrue(isinstance(loaded_schema.harmful_effect, int))
self.assertTrue(isinstance(loaded_schema.cooldown, int))
self.assertTrue(isinstance(loaded_schema.comment, str))
self.assertIsNone(loaded_schema.buff)
self.assertTrue(isinstance(loaded_schema.dot, DotSchema))
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
Test for the PaladinSpellSchema values
|
Test for the PaladinSpellSchema values
|
Python
|
mit
|
Enether/python_wow
|
Test for the PaladinSpellSchema values
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.paladin_spells_template import PaladinSpellsSchema
from spells import PaladinSpell
from models.items.item_template import ItemTemplateSchema
from models.spells.spell_dots import DotSchema
from buffs import BeneficialBuff, DoT
class PaladinSpellsSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the values in the Schema are as expected
And that the convert_to_paladin_spell_object function works
"""
self.spell_entry = 4
self.spell_name = 'Melting Strike'
self.expected_spell = PaladinSpell(name=self.spell_name, rank=1, damage1=3, damage2=0, damage3=0,
heal1=0, heal2=0, heal3=0, mana_cost=6, cooldown=3,
beneficial_effect=None, harmful_effect=None)
def test_schema_values(self):
""" Load a schema object and assert that every value is as expected"""
loaded_schema: PaladinSpellsSchema = session.query(PaladinSpellsSchema).get(self.spell_entry)
self.assertTrue(isinstance(loaded_schema.id, int))
self.assertTrue(isinstance(loaded_schema.name, str))
self.assertTrue(isinstance(loaded_schema.rank, int))
self.assertTrue(isinstance(loaded_schema.level_required, int))
self.assertTrue(isinstance(loaded_schema.damage1, int))
self.assertTrue(isinstance(loaded_schema.damage2, int))
self.assertTrue(isinstance(loaded_schema.damage3, int))
self.assertTrue(isinstance(loaded_schema.heal1, int))
self.assertTrue(isinstance(loaded_schema.heal2, int))
self.assertTrue(isinstance(loaded_schema.heal3, int))
self.assertTrue(isinstance(loaded_schema.mana_cost, int))
self.assertIsNone(loaded_schema.beneficial_effect)
self.assertTrue(isinstance(loaded_schema.harmful_effect, int))
self.assertTrue(isinstance(loaded_schema.cooldown, int))
self.assertTrue(isinstance(loaded_schema.comment, str))
self.assertIsNone(loaded_schema.buff)
self.assertTrue(isinstance(loaded_schema.dot, DotSchema))
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the PaladinSpellSchema values<commit_after>
|
import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.paladin_spells_template import PaladinSpellsSchema
from spells import PaladinSpell
from models.items.item_template import ItemTemplateSchema
from models.spells.spell_dots import DotSchema
from buffs import BeneficialBuff, DoT
class PaladinSpellsSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the values in the Schema are as expected
And that the convert_to_paladin_spell_object function works
"""
self.spell_entry = 4
self.spell_name = 'Melting Strike'
self.expected_spell = PaladinSpell(name=self.spell_name, rank=1, damage1=3, damage2=0, damage3=0,
heal1=0, heal2=0, heal3=0, mana_cost=6, cooldown=3,
beneficial_effect=None, harmful_effect=None)
def test_schema_values(self):
""" Load a schema object and assert that every value is as expected"""
loaded_schema: PaladinSpellsSchema = session.query(PaladinSpellsSchema).get(self.spell_entry)
self.assertTrue(isinstance(loaded_schema.id, int))
self.assertTrue(isinstance(loaded_schema.name, str))
self.assertTrue(isinstance(loaded_schema.rank, int))
self.assertTrue(isinstance(loaded_schema.level_required, int))
self.assertTrue(isinstance(loaded_schema.damage1, int))
self.assertTrue(isinstance(loaded_schema.damage2, int))
self.assertTrue(isinstance(loaded_schema.damage3, int))
self.assertTrue(isinstance(loaded_schema.heal1, int))
self.assertTrue(isinstance(loaded_schema.heal2, int))
self.assertTrue(isinstance(loaded_schema.heal3, int))
self.assertTrue(isinstance(loaded_schema.mana_cost, int))
self.assertIsNone(loaded_schema.beneficial_effect)
self.assertTrue(isinstance(loaded_schema.harmful_effect, int))
self.assertTrue(isinstance(loaded_schema.cooldown, int))
self.assertTrue(isinstance(loaded_schema.comment, str))
self.assertIsNone(loaded_schema.buff)
self.assertTrue(isinstance(loaded_schema.dot, DotSchema))
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
Test for the PaladinSpellSchema valuesimport unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.paladin_spells_template import PaladinSpellsSchema
from spells import PaladinSpell
from models.items.item_template import ItemTemplateSchema
from models.spells.spell_dots import DotSchema
from buffs import BeneficialBuff, DoT
class PaladinSpellsSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the values in the Schema are as expected
And that the convert_to_paladin_spell_object function works
"""
self.spell_entry = 4
self.spell_name = 'Melting Strike'
self.expected_spell = PaladinSpell(name=self.spell_name, rank=1, damage1=3, damage2=0, damage3=0,
heal1=0, heal2=0, heal3=0, mana_cost=6, cooldown=3,
beneficial_effect=None, harmful_effect=None)
def test_schema_values(self):
""" Load a schema object and assert that every value is as expected"""
loaded_schema: PaladinSpellsSchema = session.query(PaladinSpellsSchema).get(self.spell_entry)
self.assertTrue(isinstance(loaded_schema.id, int))
self.assertTrue(isinstance(loaded_schema.name, str))
self.assertTrue(isinstance(loaded_schema.rank, int))
self.assertTrue(isinstance(loaded_schema.level_required, int))
self.assertTrue(isinstance(loaded_schema.damage1, int))
self.assertTrue(isinstance(loaded_schema.damage2, int))
self.assertTrue(isinstance(loaded_schema.damage3, int))
self.assertTrue(isinstance(loaded_schema.heal1, int))
self.assertTrue(isinstance(loaded_schema.heal2, int))
self.assertTrue(isinstance(loaded_schema.heal3, int))
self.assertTrue(isinstance(loaded_schema.mana_cost, int))
self.assertIsNone(loaded_schema.beneficial_effect)
self.assertTrue(isinstance(loaded_schema.harmful_effect, int))
self.assertTrue(isinstance(loaded_schema.cooldown, int))
self.assertTrue(isinstance(loaded_schema.comment, str))
self.assertIsNone(loaded_schema.buff)
self.assertTrue(isinstance(loaded_schema.dot, DotSchema))
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the PaladinSpellSchema values<commit_after>import unittest
from tests.delete_test_db import delete_test_db # module that deletes the DB :)
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from models.spells.paladin_spells_template import PaladinSpellsSchema
from spells import PaladinSpell
from models.items.item_template import ItemTemplateSchema
from models.spells.spell_dots import DotSchema
from buffs import BeneficialBuff, DoT
class PaladinSpellsSchemaTests(unittest.TestCase):
def setUp(self):
"""
Test that the values in the Schema are as expected
And that the convert_to_paladin_spell_object function works
"""
self.spell_entry = 4
self.spell_name = 'Melting Strike'
self.expected_spell = PaladinSpell(name=self.spell_name, rank=1, damage1=3, damage2=0, damage3=0,
heal1=0, heal2=0, heal3=0, mana_cost=6, cooldown=3,
beneficial_effect=None, harmful_effect=None)
def test_schema_values(self):
""" Load a schema object and assert that every value is as expected"""
loaded_schema: PaladinSpellsSchema = session.query(PaladinSpellsSchema).get(self.spell_entry)
self.assertTrue(isinstance(loaded_schema.id, int))
self.assertTrue(isinstance(loaded_schema.name, str))
self.assertTrue(isinstance(loaded_schema.rank, int))
self.assertTrue(isinstance(loaded_schema.level_required, int))
self.assertTrue(isinstance(loaded_schema.damage1, int))
self.assertTrue(isinstance(loaded_schema.damage2, int))
self.assertTrue(isinstance(loaded_schema.damage3, int))
self.assertTrue(isinstance(loaded_schema.heal1, int))
self.assertTrue(isinstance(loaded_schema.heal2, int))
self.assertTrue(isinstance(loaded_schema.heal3, int))
self.assertTrue(isinstance(loaded_schema.mana_cost, int))
self.assertIsNone(loaded_schema.beneficial_effect)
self.assertTrue(isinstance(loaded_schema.harmful_effect, int))
self.assertTrue(isinstance(loaded_schema.cooldown, int))
self.assertTrue(isinstance(loaded_schema.comment, str))
self.assertIsNone(loaded_schema.buff)
self.assertTrue(isinstance(loaded_schema.dot, DotSchema))
def tearDownModule():
delete_test_db()
if __name__ == '__main__':
unittest.main()
|
|
803201baa32fb847f363b6807f92f2d0b6a51c51
|
tests/test_abort_generate_on_hook_error.py
|
tests/test_abort_generate_on_hook_error.py
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
|
Test that an error in pre_gen_project aborts generation
|
Test that an error in pre_gen_project aborts generation
|
Python
|
bsd-3-clause
|
audreyr/cookiecutter,stevepiercy/cookiecutter,stevepiercy/cookiecutter,michaeljoseph/cookiecutter,willingc/cookiecutter,Springerle/cookiecutter,hackebrot/cookiecutter,hackebrot/cookiecutter,pjbull/cookiecutter,michaeljoseph/cookiecutter,audreyr/cookiecutter,Springerle/cookiecutter,terryjbates/cookiecutter,terryjbates/cookiecutter,dajose/cookiecutter,dajose/cookiecutter,luzfcb/cookiecutter,luzfcb/cookiecutter,pjbull/cookiecutter,willingc/cookiecutter
|
Test that an error in pre_gen_project aborts generation
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
|
<commit_before><commit_msg>Test that an error in pre_gen_project aborts generation<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
|
Test that an error in pre_gen_project aborts generation# -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
|
<commit_before><commit_msg>Test that an error in pre_gen_project aborts generation<commit_after># -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
|
|
b079edc37cd8abb68194637ee90b9fecc51b9b98
|
corehq/apps/cachehq/tests.py
|
corehq/apps/cachehq/tests.py
|
from copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
|
Add basic test for document quickcaching
|
Add basic test for document quickcaching
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add basic test for document quickcaching
|
from copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
|
<commit_before><commit_msg>Add basic test for document quickcaching<commit_after>
|
from copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
|
Add basic test for document quickcachingfrom copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
|
<commit_before><commit_msg>Add basic test for document quickcaching<commit_after>from copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
|
|
b159433375714c67ac36e58d4323196222759f30
|
babybuddy/migrations/0003_add_refresh_help_text.py
|
babybuddy/migrations/0003_add_refresh_help_text.py
|
# Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
|
Add missing migration from 096092b.
|
Add missing migration from 096092b.
|
Python
|
bsd-2-clause
|
cdubz/babybuddy,cdubz/babybuddy,cdubz/babybuddy
|
Add missing migration from 096092b.
|
# Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
|
<commit_before><commit_msg>Add missing migration from 096092b.<commit_after>
|
# Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
|
Add missing migration from 096092b.# Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
|
<commit_before><commit_msg>Add missing migration from 096092b.<commit_after># Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
|
|
e41b79855e966977c4484efd4ad6a02475833b3e
|
code/ex4.4-tornado_with_asyncio.py
|
code/ex4.4-tornado_with_asyncio.py
|
from tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
|
Add ex4.4: tornado multiple requests with asyncio integration
|
Add ex4.4: tornado multiple requests with asyncio integration
|
Python
|
mit
|
MA3STR0/PythonAsyncWorkshop
|
Add ex4.4: tornado multiple requests with asyncio integration
|
from tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
|
<commit_before><commit_msg>Add ex4.4: tornado multiple requests with asyncio integration<commit_after>
|
from tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
|
Add ex4.4: tornado multiple requests with asyncio integrationfrom tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
|
<commit_before><commit_msg>Add ex4.4: tornado multiple requests with asyncio integration<commit_after>from tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
|
|
ac482caafe8c63de2606bb4894462f7b2e2bcb70
|
python/printbag.py
|
python/printbag.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
|
Add initial script to print rosbag files
|
Add initial script to print rosbag files
|
Python
|
bsd-2-clause
|
oliverlee/antlia
|
Add initial script to print rosbag files
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
|
<commit_before><commit_msg>Add initial script to print rosbag files<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
|
Add initial script to print rosbag files#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
|
<commit_before><commit_msg>Add initial script to print rosbag files<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
|
|
a2e566cc0b925f80c30602141e890cdf9b13306b
|
migrations/versions/1003fd6fc47_.py
|
migrations/versions/1003fd6fc47_.py
|
"""empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
|
Migrate to latest version of db.
|
Migrate to latest version of db.
|
Python
|
mit
|
PythonClutch/python-clutch,PythonClutch/python-clutch,PythonClutch/python-clutch
|
Migrate to latest version of db.
|
"""empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
|
<commit_before><commit_msg>Migrate to latest version of db.<commit_after>
|
"""empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
|
Migrate to latest version of db."""empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
|
<commit_before><commit_msg>Migrate to latest version of db.<commit_after>"""empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
|
|
4aced6fea8ff8ccd087362cb237a9f00d111d0d8
|
corehq/apps/commtrack/management/commands/toggle_locations.py
|
corehq/apps/commtrack/management/commands/toggle_locations.py
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
|
Add command to turn on locations flag
|
Add command to turn on locations flag
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq
|
Add command to turn on locations flag
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
|
<commit_before><commit_msg>Add command to turn on locations flag<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
|
Add command to turn on locations flagfrom django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
|
<commit_before><commit_msg>Add command to turn on locations flag<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
|
|
1eaab9f929dc748e57865fb4c8717158e6c47fa5
|
ureport/stats/migrations/0018_better_indexes.py
|
ureport/stats/migrations/0018_better_indexes.py
|
# Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
|
Add more index on contact activities
|
Add more index on contact activities
|
Python
|
agpl-3.0
|
rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport
|
Add more index on contact activities
|
# Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
|
<commit_before><commit_msg>Add more index on contact activities<commit_after>
|
# Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
|
Add more index on contact activities# Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
|
<commit_before><commit_msg>Add more index on contact activities<commit_after># Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
|
|
95d87c541ebf82109b882daebcb5b387f0f1cdb8
|
exp/influence2/ReputationExp2.py
|
exp/influence2/ReputationExp2.py
|
import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
|
Read the american physics society graph
|
Read the american physics society graph
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Read the american physics society graph
|
import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
|
<commit_before><commit_msg>Read the american physics society graph <commit_after>
|
import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
|
Read the american physics society graph import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
|
<commit_before><commit_msg>Read the american physics society graph <commit_after>import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
|
|
e2020af5ccd41f8571a2d0db4f5345ca9a8b561e
|
gmn/src/d1_gmn/app/migrations/0010_auto_20170805_0107.py
|
gmn/src/d1_gmn/app/migrations/0010_auto_20170805_0107.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
|
Add migration for db changes
|
Add migration for db changes
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add migration for db changes
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
|
<commit_before><commit_msg>Add migration for db changes<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
|
Add migration for db changes# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
|
<commit_before><commit_msg>Add migration for db changes<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
|
|
a0cd167b9f19e2a4a9d1f2a80bc3586cce15c6ab
|
gmn/src/d1_gmn/app/migrations/0019_auto_20190418_1512.py
|
gmn/src/d1_gmn/app/migrations/0019_auto_20190418_1512.py
|
# Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
|
Add GMN DB migration to current
|
Add GMN DB migration to current
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add GMN DB migration to current
|
# Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
|
<commit_before><commit_msg>Add GMN DB migration to current<commit_after>
|
# Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
|
Add GMN DB migration to current# Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
|
<commit_before><commit_msg>Add GMN DB migration to current<commit_after># Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
|
|
8b467efd1f998d05da0272a284773501f0b330ff
|
djangae/tests/test_meta_queries.py
|
djangae/tests/test_meta_queries.py
|
from django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
|
Add a test file which was missing from a recent branch
|
Add a test file which was missing from a recent branch
|
Python
|
bsd-3-clause
|
grzes/djangae,grzes/djangae,potatolondon/djangae,grzes/djangae,potatolondon/djangae
|
Add a test file which was missing from a recent branch
|
from django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
|
<commit_before><commit_msg>Add a test file which was missing from a recent branch<commit_after>
|
from django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
|
Add a test file which was missing from a recent branchfrom django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
|
<commit_before><commit_msg>Add a test file which was missing from a recent branch<commit_after>from django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
|
|
615247c28d58fbbff40f5e4122441d77acb19003
|
notification/urls.py
|
notification/urls.py
|
from django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
|
Integrate notification app in settings and add basic structure of files
|
Integrate notification app in settings and add basic structure of files
|
Python
|
agpl-3.0
|
Fleeg/fleeg-platform,Fleeg/fleeg-platform
|
Integrate notification app in settings and add basic structure of files
|
from django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
|
<commit_before><commit_msg>Integrate notification app in settings and add basic structure of files<commit_after>
|
from django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
|
Integrate notification app in settings and add basic structure of filesfrom django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
|
<commit_before><commit_msg>Integrate notification app in settings and add basic structure of files<commit_after>from django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
|
|
6beccf0c0b4e7788403415c05ae9f31e6c0a89eb
|
tests/test_gpa.py
|
tests/test_gpa.py
|
import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
|
Add tests for Generalized Procrustes Analysis (GPA)
|
Add tests for Generalized Procrustes Analysis (GPA)
|
Python
|
mit
|
MaxHalford/Prince
|
Add tests for Generalized Procrustes Analysis (GPA)
|
import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
|
<commit_before><commit_msg>Add tests for Generalized Procrustes Analysis (GPA)<commit_after>
|
import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
|
Add tests for Generalized Procrustes Analysis (GPA)import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
|
<commit_before><commit_msg>Add tests for Generalized Procrustes Analysis (GPA)<commit_after>import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
|
|
0baca9564c9df7b06645f71abdda0fe3090f46a6
|
utils/lit/tests/xunit-output.py
|
utils/lit/tests/xunit-output.py
|
# Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
Add a test-case for lit xunit output
|
Add a test-case for lit xunit output
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@223307 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
GPUOpen-Drivers/llvm,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,apple/swift-llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,dslab-epfl/asap,dslab-epfl/asap,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,dslab-epfl/asap,dslab-epfl/asap,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,dslab-epfl/asap,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,apple/swift-llvm,apple/swift-llvm,dslab-epfl/asap
|
Add a test-case for lit xunit output
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@223307 91177308-0d34-0410-b5e6-96231b3b80d8
|
# Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
<commit_before><commit_msg>Add a test-case for lit xunit output
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@223307 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
# Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
Add a test-case for lit xunit output
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@223307 91177308-0d34-0410-b5e6-96231b3b80d8# Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
<commit_before><commit_msg>Add a test-case for lit xunit output
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@223307 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after># Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
|
|
a102fb888b60454d7efbe26e4afb38a59c212769
|
p3/management/commands/delete_spam_users.py
|
p3/management/commands/delete_spam_users.py
|
# -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '้่ฏ้38ๅ
',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
|
Add script to delete spam users.
|
Add script to delete spam users.
|
Python
|
bsd-2-clause
|
EuroPython/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon
|
Add script to delete spam users.
|
# -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '้่ฏ้38ๅ
',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
|
<commit_before><commit_msg>Add script to delete spam users.<commit_after>
|
# -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '้่ฏ้38ๅ
',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
|
Add script to delete spam users.# -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '้่ฏ้38ๅ
',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
|
<commit_before><commit_msg>Add script to delete spam users.<commit_after># -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '้่ฏ้38ๅ
',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
|
|
1c615be1d3da720d2d0a1974808e3856cbd9d498
|
virgil_sdk/api/virgil_api.py
|
virgil_sdk/api/virgil_api.py
|
# Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
|
Create Virgil highlevel api implementation
|
[SDK-165] Create Virgil highlevel api implementation
|
Python
|
bsd-3-clause
|
VirgilSecurity/virgil-sdk-python
|
[SDK-165] Create Virgil highlevel api implementation
|
# Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
|
<commit_before><commit_msg>[SDK-165] Create Virgil highlevel api implementation<commit_after>
|
# Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
|
[SDK-165] Create Virgil highlevel api implementation# Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
|
<commit_before><commit_msg>[SDK-165] Create Virgil highlevel api implementation<commit_after># Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
|
|
6090dc1539bd0701381c73128a5ca0606adc09e4
|
tests/utils/test_ssdp.py
|
tests/utils/test_ssdp.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
|
Add SSDP unit test case (init)
|
Add SSDP unit test case (init)
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add SSDP unit test case (init)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
|
<commit_before><commit_msg>Add SSDP unit test case (init)<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
|
Add SSDP unit test case (init)# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
|
<commit_before><commit_msg>Add SSDP unit test case (init)<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
|
|
f31b42ae43e7cd2af53a504c1cc2ab398bf7810d
|
main.py
|
main.py
|
import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
|
Add api call for Premier League standings
|
Add api call for Premier League standings
Also add function to print standings in tablular form using tabulate.
|
Python
|
mit
|
conormag94/pyscores
|
Add api call for Premier League standings
Also add function to print standings in tablular form using tabulate.
|
import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add api call for Premier League standings
Also add function to print standings in tablular form using tabulate.<commit_after>
|
import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
|
Add api call for Premier League standings
Also add function to print standings in tablular form using tabulate.import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add api call for Premier League standings
Also add function to print standings in tablular form using tabulate.<commit_after>import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
|
|
29e18ed63177dbe8306a22e3c0583342f4591464
|
python/ample_exit.py
|
python/ample_exit.py
|
'''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
|
Exit routine for a controlled exit from ample
|
Exit routine for a controlled exit from ample
|
Python
|
bsd-3-clause
|
linucks/ample,rigdenlab/ample,linucks/ample,rigdenlab/ample
|
Exit routine for a controlled exit from ample
|
'''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
|
<commit_before><commit_msg>Exit routine for a controlled exit from ample<commit_after>
|
'''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
|
Exit routine for a controlled exit from ample'''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
|
<commit_before><commit_msg>Exit routine for a controlled exit from ample<commit_after>'''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
|
|
fa3450a44621fab4a9a2f2ed1599d08f66860f70
|
integrate_density.py
|
integrate_density.py
|
import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
|
Integrate densities to check normalization
|
Integrate densities to check normalization
|
Python
|
mit
|
jarthurgross/bloch_distribution
|
Integrate densities to check normalization
|
import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
|
<commit_before><commit_msg>Integrate densities to check normalization<commit_after>
|
import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
|
Integrate densities to check normalizationimport argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
|
<commit_before><commit_msg>Integrate densities to check normalization<commit_after>import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
|
|
72a573c24d5234003b9eeb9e0cc487d174908a2e
|
typeahead_search/trie.py
|
typeahead_search/trie.py
|
"""A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
|
Add a Trie for storage of data string tokens.
|
[typeahead_search] Add a Trie for storage of data string tokens.
|
Python
|
mit
|
geekofalltrades/quora-coding-challenges
|
[typeahead_search] Add a Trie for storage of data string tokens.
|
"""A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
|
<commit_before><commit_msg>[typeahead_search] Add a Trie for storage of data string tokens.<commit_after>
|
"""A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
|
[typeahead_search] Add a Trie for storage of data string tokens."""A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
|
<commit_before><commit_msg>[typeahead_search] Add a Trie for storage of data string tokens.<commit_after>"""A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
|
|
01c74cfea946eac098a0e144380314cd4676cf2f
|
analysis/04-lowpass.py
|
analysis/04-lowpass.py
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
|
Split lowpass filtering into another script.
|
Split lowpass filtering into another script.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Split lowpass filtering into another script.
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Split lowpass filtering into another script.<commit_after>
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
|
Split lowpass filtering into another script.#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Split lowpass filtering into another script.<commit_after>#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
|
|
6a9b6f0227b37d9c4da424c25d20a2b7e9397a9f
|
alembic/versions/3800f47ba771_publication_date_not_nullable.py
|
alembic/versions/3800f47ba771_publication_date_not_nullable.py
|
"""Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
|
Make `publication_date` column not nullable.
|
Make `publication_date` column not nullable.
BEFORE: It was nullable (an artifact of us having added it after the
table was initially created and populated).
AFTER: It is not nullable (because we just ran a script to populate the
new column).
|
Python
|
isc
|
gthank/pytips,gthank/pytips,gthank/pytips,gthank/pytips
|
Make `publication_date` column not nullable.
BEFORE: It was nullable (an artifact of us having added it after the
table was initially created and populated).
AFTER: It is not nullable (because we just ran a script to populate the
new column).
|
"""Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
|
<commit_before><commit_msg>Make `publication_date` column not nullable.
BEFORE: It was nullable (an artifact of us having added it after the
table was initially created and populated).
AFTER: It is not nullable (because we just ran a script to populate the
new column).<commit_after>
|
"""Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
|
Make `publication_date` column not nullable.
BEFORE: It was nullable (an artifact of us having added it after the
table was initially created and populated).
AFTER: It is not nullable (because we just ran a script to populate the
new column)."""Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
|
<commit_before><commit_msg>Make `publication_date` column not nullable.
BEFORE: It was nullable (an artifact of us having added it after the
table was initially created and populated).
AFTER: It is not nullable (because we just ran a script to populate the
new column).<commit_after>"""Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
|
|
6b1be6883ead01cc226226499644adb7e99542f8
|
Experiments/evaluate_model.py
|
Experiments/evaluate_model.py
|
# import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
|
Add functionality to load and test a saved model
|
Add functionality to load and test a saved model
|
Python
|
mit
|
harpribot/representation-music,harpribot/representation-music
|
Add functionality to load and test a saved model
|
# import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
|
<commit_before><commit_msg>Add functionality to load and test a saved model<commit_after>
|
# import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
|
Add functionality to load and test a saved model# import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
|
<commit_before><commit_msg>Add functionality to load and test a saved model<commit_after># import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
|
|
3501462ebafa15b19ef436231a5a0d9e3b5d430a
|
indra/ontology/virtual_ontology.py
|
indra/ontology/virtual_ontology.py
|
import requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
|
Add first implementation of virtual ontology
|
Add first implementation of virtual ontology
|
Python
|
bsd-2-clause
|
johnbachman/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/indra
|
Add first implementation of virtual ontology
|
import requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
|
<commit_before><commit_msg>Add first implementation of virtual ontology<commit_after>
|
import requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
|
Add first implementation of virtual ontologyimport requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
|
<commit_before><commit_msg>Add first implementation of virtual ontology<commit_after>import requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
|
|
3283c9ac640112ab7a26ec3f82e051394ca72ecf
|
PRESUBMIT.py
|
PRESUBMIT.py
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
}
|
Add catapult presubmit with list of trybots.
|
Add catapult presubmit with list of trybots.
This should make 'git cl try' work correctly
Review URL: https://codereview.chromium.org/1162013002
|
Python
|
bsd-3-clause
|
catapult-project/catapult-csm,dstockwell/catapult,sahiljain/catapult,catapult-project/catapult,zeptonaut/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,modulexcite/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,dstockwell/catapult,catapult-project/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,benschmaus/catapult,sahiljain/catapult,modulexcite/catapult,benschmaus/catapult,0x90sled/catapult,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,scottmcmaster/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,modulexcite/catapult,danbeam/catapult,0x90sled/catapult,benschmaus/catapult,dstockwell/catapult,benschmaus/catapult,zeptonaut/catapult,zeptonaut/catapult,danbeam/catapult,sahiljain/catapult,benschmaus/catapult,danbeam/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,danbeam/catapult,benschmaus/catapult,0x90sled/catapult,scottmcmaster/catapult,scottmcmaster/catapult,benschmaus/catapult,dstockwell/catapult,sahiljain/catapult
|
Add catapult presubmit with list of trybots.
This should make 'git cl try' work correctly
Review URL: https://codereview.chromium.org/1162013002
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
}
|
<commit_before><commit_msg>Add catapult presubmit with list of trybots.
This should make 'git cl try' work correctly
Review URL: https://codereview.chromium.org/1162013002<commit_after>
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
}
|
Add catapult presubmit with list of trybots.
This should make 'git cl try' work correctly
Review URL: https://codereview.chromium.org/1162013002# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
}
|
<commit_before><commit_msg>Add catapult presubmit with list of trybots.
This should make 'git cl try' work correctly
Review URL: https://codereview.chromium.org/1162013002<commit_after># Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
}
|
|
05a5599fd0cf08cf33c8a90673e8c71b4c1d6c36
|
slides/ComputationalGeometry/convex-hull.py
|
slides/ComputationalGeometry/convex-hull.py
|
import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
|
Test implementation of convex hull
|
Test implementation of convex hull
|
Python
|
mit
|
tylerburnham42/ProgrammingTeam,MercerBinaryBears/Slides,tylerburnham42/ProgrammingTeam,MercerBinaryBears/Slides,MercerBinaryBears/Slides,MercerBinaryBears/Slides,MercerBinaryBears/Slides,tylerburnham42/ProgrammingTeam
|
Test implementation of convex hull
|
import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
|
<commit_before><commit_msg>Test implementation of convex hull<commit_after>
|
import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
|
Test implementation of convex hullimport math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
|
<commit_before><commit_msg>Test implementation of convex hull<commit_after>import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
|
|
e0597427d93f2260dfce35cfdd3e2714037fb0fb
|
src/spatial_discretizations/FourierChebyshevSpatialDiscretization.py
|
src/spatial_discretizations/FourierChebyshevSpatialDiscretization.py
|
import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM)
|
Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.
|
Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.
|
Python
|
mit
|
dsteinmo/pysws,dsteinmo/pysws
|
Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.
|
import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM)
|
<commit_before><commit_msg>Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.<commit_after>
|
import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM)
|
Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM)
|
<commit_before><commit_msg>Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices.<commit_after>import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM)
|
|
dcd02e0a7b626111bc0fc344df9f6fff2de832ae
|
examples/missingmethod.py
|
examples/missingmethod.py
|
#!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
|
Add a (bad) example of missing method.
|
Add a (bad) example of missing method.
|
Python
|
lgpl-2.1
|
zeth/ainod,zeth/ainod,zeth/ainod
|
Add a (bad) example of missing method.
|
#!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a (bad) example of missing method.<commit_after>
|
#!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
|
Add a (bad) example of missing method.#!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a (bad) example of missing method.<commit_after>#!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
|
|
d692508e9c6fba847f3bb179bbfd3684e6ebcef0
|
py/shuffle-an-array.py
|
py/shuffle-an-array.py
|
from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
|
Add py solution for 384. Shuffle an Array
|
Add py solution for 384. Shuffle an Array
384. Shuffle an Array: https://leetcode.com/problems/shuffle-an-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 384. Shuffle an Array
384. Shuffle an Array: https://leetcode.com/problems/shuffle-an-array/
|
from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
|
<commit_before><commit_msg>Add py solution for 384. Shuffle an Array
384. Shuffle an Array: https://leetcode.com/problems/shuffle-an-array/<commit_after>
|
from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
|
Add py solution for 384. Shuffle an Array
384. Shuffle an Array: https://leetcode.com/problems/shuffle-an-array/from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
|
<commit_before><commit_msg>Add py solution for 384. Shuffle an Array
384. Shuffle an Array: https://leetcode.com/problems/shuffle-an-array/<commit_after>from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
|
|
f13045b5f933078225b89405a786c14da34d0af5
|
scripts/clamav.py
|
scripts/clamav.py
|
import pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
|
Add ClamAV script to analyze HTTPS traffic for viruses
|
Add ClamAV script to analyze HTTPS traffic for viruses
|
Python
|
mit
|
mhils/HoneyProxy,mhils/HoneyProxy,mhils/HoneyProxy,mhils/HoneyProxy
|
Add ClamAV script to analyze HTTPS traffic for viruses
|
import pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
|
<commit_before><commit_msg>Add ClamAV script to analyze HTTPS traffic for viruses<commit_after>
|
import pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
|
Add ClamAV script to analyze HTTPS traffic for virusesimport pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
|
<commit_before><commit_msg>Add ClamAV script to analyze HTTPS traffic for viruses<commit_after>import pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
|
|
95ceeb0af4e549e0d211b4e1ba6157d26ad5e44d
|
sync_scheduler.py
|
sync_scheduler.py
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
|
Fix race between MQ and mongo setting QueuedAt
|
Fix race between MQ and mongo setting QueuedAt
|
Python
|
apache-2.0
|
cgourlay/tapiriik,cheatos101/tapiriik,cmgrote/tapiriik,abhijit86k/tapiriik,dmschreiber/tapiriik,cpfair/tapiriik,abhijit86k/tapiriik,cpfair/tapiriik,mjnbike/tapiriik,abs0/tapiriik,gavioto/tapiriik,mjnbike/tapiriik,cheatos101/tapiriik,brunoflores/tapiriik,marxin/tapiriik,brunoflores/tapiriik,dlenski/tapiriik,abs0/tapiriik,dmschreiber/tapiriik,dmschreiber/tapiriik,abs0/tapiriik,cheatos101/tapiriik,mduggan/tapiriik,mjnbike/tapiriik,abhijit86k/tapiriik,gavioto/tapiriik,marxin/tapiriik,cpfair/tapiriik,cheatos101/tapiriik,marxin/tapiriik,olamy/tapiriik,cmgrote/tapiriik,campbellr/tapiriik,brunoflores/tapiriik,abhijit86k/tapiriik,mduggan/tapiriik,olamy/tapiriik,olamy/tapiriik,olamy/tapiriik,cmgrote/tapiriik,mduggan/tapiriik,mduggan/tapiriik,abs0/tapiriik,cgourlay/tapiriik,niosus/tapiriik,cgourlay/tapiriik,campbellr/tapiriik,mjnbike/tapiriik,brunoflores/tapiriik,dlenski/tapiriik,niosus/tapiriik,gavioto/tapiriik,marxin/tapiriik,cpfair/tapiriik,cgourlay/tapiriik,campbellr/tapiriik,cmgrote/tapiriik,dlenski/tapiriik,campbellr/tapiriik,dmschreiber/tapiriik,niosus/tapiriik,dlenski/tapiriik,gavioto/tapiriik,niosus/tapiriik
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
Fix race between MQ and mongo setting QueuedAt
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
|
<commit_before>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
<commit_msg>Fix race between MQ and mongo setting QueuedAt<commit_after>
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
|
from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
Fix race between MQ and mongo setting QueuedAtfrom tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
|
<commit_before>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
<commit_msg>Fix race between MQ and mongo setting QueuedAt<commit_after>from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
|
c8271b02c3636aa9620cce8b85c823ff0ec35c4a
|
examples/test_skype_site.py
|
examples/test_skype_site.py
|
"""
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
|
Add a mobile device test of the Skype website
|
Add a mobile device test of the Skype website
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add a mobile device test of the Skype website
|
"""
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
|
<commit_before><commit_msg>Add a mobile device test of the Skype website<commit_after>
|
"""
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
|
Add a mobile device test of the Skype website"""
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
|
<commit_before><commit_msg>Add a mobile device test of the Skype website<commit_after>"""
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
|
|
8daf4237aa84a6b032e7627afb31b29a44f47ddc
|
ProgressBar.py
|
ProgressBar.py
|
import sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
|
Add another .py file for progress bar
|
Add another .py file for progress bar
Add another .py file for progress bar
|
Python
|
mit
|
vicyangworld/AutoOfficer
|
Add another .py file for progress bar
Add another .py file for progress bar
|
import sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
|
<commit_before><commit_msg>Add another .py file for progress bar
Add another .py file for progress bar<commit_after>
|
import sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
|
Add another .py file for progress bar
Add another .py file for progress barimport sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
|
<commit_before><commit_msg>Add another .py file for progress bar
Add another .py file for progress bar<commit_after>import sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar๏ผ")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
|
|
76c25395590aa9dee64ca138633f01b62ac0d26b
|
providers/io/osf/registrations/migrations/0001_initial.py
|
providers/io/osf/registrations/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
|
Add new provider migration for osf registrations
|
Add new provider migration for osf registrations
|
Python
|
apache-2.0
|
aaxelb/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,zamattiac/SHARE,zamattiac/SHARE,zamattiac/SHARE,laurenbarker/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE
|
Add new provider migration for osf registrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
|
<commit_before><commit_msg>Add new provider migration for osf registrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
|
Add new provider migration for osf registrations# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
|
<commit_before><commit_msg>Add new provider migration for osf registrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
|
|
7c755e7839f7c602a6c93b1aa2f5011e89d15c85
|
nodeconductor/iaas/management/commands/addmissingpricelistflavors.py
|
nodeconductor/iaas/management/commands/addmissingpricelistflavors.py
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
|
Create command for generating prices for flavors
|
Create command for generating prices for flavors
- itacloud-5319
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Create command for generating prices for flavors
- itacloud-5319
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
|
<commit_before><commit_msg>Create command for generating prices for flavors
- itacloud-5319<commit_after>
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
|
Create command for generating prices for flavors
- itacloud-5319from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
|
<commit_before><commit_msg>Create command for generating prices for flavors
- itacloud-5319<commit_after>from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
|
|
2616d8f3ef51a8551ac14a9e83b0298b8165093a
|
Superbuild/Projects/apple/fixup_plugin2.py
|
Superbuild/Projects/apple/fixup_plugin2.py
|
#!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
|
Add work-in-progress script to fixup a standalone plugin library.
|
Add work-in-progress script to fixup a standalone plugin library.
|
Python
|
apache-2.0
|
frizaro/Veloview,frizaro/Veloview,Kitware/VeloView,Kitware/VeloView,Kitware/VeloView,Kitware/VeloView,frizaro/Veloview,frizaro/Veloview,Kitware/VeloView
|
Add work-in-progress script to fixup a standalone plugin library.
|
#!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
|
<commit_before><commit_msg>Add work-in-progress script to fixup a standalone plugin library.<commit_after>
|
#!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
|
Add work-in-progress script to fixup a standalone plugin library.#!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
|
<commit_before><commit_msg>Add work-in-progress script to fixup a standalone plugin library.<commit_after>#!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
|
|
735135c5570edd38324fe3e94aa2f4c2f3043627
|
cla_backend/apps/legalaid/migrations/0023_migrate_contact_for_research_via_field.py
|
cla_backend/apps/legalaid/migrations/0023_migrate_contact_for_research_via_field.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
|
Migrate data from contact_for_research_via and into contact_for_research_methods many to many field
|
Migrate data from contact_for_research_via and into contact_for_research_methods many to many field
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Migrate data from contact_for_research_via and into contact_for_research_methods many to many field
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
|
<commit_before><commit_msg>Migrate data from contact_for_research_via and into contact_for_research_methods many to many field<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
|
Migrate data from contact_for_research_via and into contact_for_research_methods many to many field# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
|
<commit_before><commit_msg>Migrate data from contact_for_research_via and into contact_for_research_methods many to many field<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
|
|
3912416390ebe5df3c883b280cc6acac5169c1f7
|
tests/test_elements_have_owner.py
|
tests/test_elements_have_owner.py
|
"""
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
|
Add test to check if elements have at least one owner
|
Add test to check if elements have at least one owner
|
Python
|
lgpl-2.1
|
amolenaar/gaphor,amolenaar/gaphor
|
Add test to check if elements have at least one owner
|
"""
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
|
<commit_before><commit_msg>Add test to check if elements have at least one owner<commit_after>
|
"""
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
|
Add test to check if elements have at least one owner"""
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
|
<commit_before><commit_msg>Add test to check if elements have at least one owner<commit_after>"""
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
|
|
4a48b8dd804f9a287d35b697d851a660eec80a75
|
tests/richenum/test_simple_enums.py
|
tests/richenum/test_simple_enums.py
|
import unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
|
Add tests for simple enums
|
Add tests for simple enums
|
Python
|
mit
|
adepue/richenum,hearsaycorp/richenum
|
Add tests for simple enums
|
import unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
|
<commit_before><commit_msg>Add tests for simple enums<commit_after>
|
import unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
|
Add tests for simple enumsimport unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
|
<commit_before><commit_msg>Add tests for simple enums<commit_after>import unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
|
|
13fdc81cb32842dc5e0f05d2aa84c997cd59daa3
|
IPython/core/tests/test_logger.py
|
IPython/core/tests/test_logger.py
|
"""Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
|
Add test that, if we failed to open the log file, we don't try to write to it.
|
Add test that, if we failed to open the log file, we don't try to write to it.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add test that, if we failed to open the log file, we don't try to write to it.
|
"""Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
|
<commit_before><commit_msg>Add test that, if we failed to open the log file, we don't try to write to it.<commit_after>
|
"""Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
|
Add test that, if we failed to open the log file, we don't try to write to it."""Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
|
<commit_before><commit_msg>Add test that, if we failed to open the log file, we don't try to write to it.<commit_after>"""Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
|
|
815ef4b4b0dce640077e1f8ecd2fbe95598bf539
|
src/ggrc/migrations/versions/20160608132526_170e453da661_add_comments_owners_info.py
|
src/ggrc/migrations/versions/20160608132526_170e453da661_add_comments_owners_info.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
Create existing comments' owners records
|
Create existing comments' owners records
|
Python
|
apache-2.0
|
kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core
|
Create existing comments' owners records
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
<commit_before><commit_msg>Create existing comments' owners records<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
Create existing comments' owners records# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
<commit_before><commit_msg>Create existing comments' owners records<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
|
c9690cabe3c4d1d02307e3594a2cac505f4a166d
|
photutils/utils/_moments.py
|
photutils/utils/_moments.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
|
Add new image moments functions
|
Add new image moments functions
|
Python
|
bsd-3-clause
|
astropy/photutils,larrybradley/photutils
|
Add new image moments functions
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
|
<commit_before><commit_msg>Add new image moments functions<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
|
Add new image moments functions# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
|
<commit_before><commit_msg>Add new image moments functions<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
|
|
6a3c960640741036c3f444547cada1e1b7a24100
|
tests/test_api.py
|
tests/test_api.py
|
import os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
|
Add first unit test for api
|
Add first unit test for api
|
Python
|
mpl-2.0
|
mitre/multiscanner,mitre/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,awest1339/multiscanner,awest1339/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,mitre/multiscanner
|
Add first unit test for api
|
import os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
|
<commit_before><commit_msg>Add first unit test for api<commit_after>
|
import os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
|
Add first unit test for apiimport os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
|
<commit_before><commit_msg>Add first unit test for api<commit_after>import os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
|
|
763680e57b28a9746050206cd63450bf11c3e512
|
src/ggrc_basic_permissions/migrations/versions/20131010001257_10adeac7b693_fix_programeditor_pe.py
|
src/ggrc_basic_permissions/migrations/versions/20131010001257_10adeac7b693_fix_programeditor_pe.py
|
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
|
Fix ProgramEditor permissions to not include Program delete
|
Fix ProgramEditor permissions to not include Program delete
|
Python
|
apache-2.0
|
VinnieJohns/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core
|
Fix ProgramEditor permissions to not include Program delete
|
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
|
<commit_before><commit_msg>Fix ProgramEditor permissions to not include Program delete<commit_after>
|
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
|
Fix ProgramEditor permissions to not include Program delete
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
|
<commit_before><commit_msg>Fix ProgramEditor permissions to not include Program delete<commit_after>
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
|
|
57fe1a44c2285f39cc1454bbd6cfb3ce621348c3
|
aligot/tests/test_user.py
|
aligot/tests/test_user.py
|
# coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
|
Add a test to validate the user creation
|
Add a test to validate the user creation
|
Python
|
mit
|
aligot-project/aligot,aligot-project/aligot,aligot-project/aligot,skitoo/aligot
|
Add a test to validate the user creation
|
# coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
|
<commit_before><commit_msg>Add a test to validate the user creation<commit_after>
|
# coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
|
Add a test to validate the user creation# coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
|
<commit_before><commit_msg>Add a test to validate the user creation<commit_after># coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
|
|
4afd2553625db404cdfedfcf336079b3d9d723e3
|
st2auth/tests/unit/test_validation_utils.py
|
st2auth/tests/unit/test_validation_utils.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
|
Add test for auth service pre-run time validation checks.
|
Add test for auth service pre-run time validation checks.
|
Python
|
apache-2.0
|
StackStorm/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,tonybaloney/st2,tonybaloney/st2,nzlosh/st2,nzlosh/st2,tonybaloney/st2
|
Add test for auth service pre-run time validation checks.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
|
<commit_before><commit_msg>Add test for auth service pre-run time validation checks.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
|
Add test for auth service pre-run time validation checks.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
|
<commit_before><commit_msg>Add test for auth service pre-run time validation checks.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
|
|
df9c8b2c2e616937afdbf09fc4a76ac7b821c8a5
|
bugimporters/tests/test_spider.py
|
bugimporters/tests/test_spider.py
|
import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
|
Add test (which we fail at the moment)
|
Add test (which we fail at the moment)
|
Python
|
agpl-3.0
|
openhatch/oh-bugimporters,openhatch/oh-bugimporters,openhatch/oh-bugimporters
|
Add test (which we fail at the moment)
|
import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
|
<commit_before><commit_msg>Add test (which we fail at the moment)<commit_after>
|
import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
|
Add test (which we fail at the moment)import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
|
<commit_before><commit_msg>Add test (which we fail at the moment)<commit_after>import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
|
|
fde083c87f0e2582fbf57415e957b93d116ad67a
|
app/soc/modules/gci/views/base.py
|
app/soc/modules/gci/views/base.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
|
Create RequestHandler related to GCI.
|
Create RequestHandler related to GCI.
--HG--
extra : rebase_source : 2b29958cb5939f0ea5b52fac58c7d18d7716299b
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Create RequestHandler related to GCI.
--HG--
extra : rebase_source : 2b29958cb5939f0ea5b52fac58c7d18d7716299b
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
|
<commit_before><commit_msg>Create RequestHandler related to GCI.
--HG--
extra : rebase_source : 2b29958cb5939f0ea5b52fac58c7d18d7716299b<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
|
Create RequestHandler related to GCI.
--HG--
extra : rebase_source : 2b29958cb5939f0ea5b52fac58c7d18d7716299b#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
|
<commit_before><commit_msg>Create RequestHandler related to GCI.
--HG--
extra : rebase_source : 2b29958cb5939f0ea5b52fac58c7d18d7716299b<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
|
|
0106355df43bc35a75aafc6b9070f78131e89bef
|
tests/search_backend_postgres.py
|
tests/search_backend_postgres.py
|
from wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for switching to postgres search backend
|
Test for switching to postgres search backend
|
Python
|
bsd-2-clause
|
p/wolis-phpbb,p/wolis-phpbb
|
Test for switching to postgres search backend
|
from wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for switching to postgres search backend<commit_after>
|
from wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for switching to postgres search backendfrom wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for switching to postgres search backend<commit_after>from wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
|
|
1f12da3d049527f838ab21c042b8f18e1977af49
|
migrations/versions/0283_platform_admin_not_live.py
|
migrations/versions/0283_platform_admin_not_live.py
|
"""empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
|
Migrate existing platform admin services to not be counted
|
Migrate existing platform admin services to not be counted
If a service has been created by someone on our team, itโs probably a
test service, which shouldnโt be included in the count of live services.
This commit adds a migration to do this for existing services.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Migrate existing platform admin services to not be counted
If a service has been created by someone on our team, itโs probably a
test service, which shouldnโt be included in the count of live services.
This commit adds a migration to do this for existing services.
|
"""empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
|
<commit_before><commit_msg>Migrate existing platform admin services to not be counted
If a service has been created by someone on our team, itโs probably a
test service, which shouldnโt be included in the count of live services.
This commit adds a migration to do this for existing services.<commit_after>
|
"""empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
|
Migrate existing platform admin services to not be counted
If a service has been created by someone on our team, itโs probably a
test service, which shouldnโt be included in the count of live services.
This commit adds a migration to do this for existing services."""empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
|
<commit_before><commit_msg>Migrate existing platform admin services to not be counted
If a service has been created by someone on our team, itโs probably a
test service, which shouldnโt be included in the count of live services.
This commit adds a migration to do this for existing services.<commit_after>"""empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
|
|
6857624e9d6633038f0565a520de856ee40def09
|
test/many_envs_test.py
|
test/many_envs_test.py
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
|
Test with many envs and large groups
|
Test with many envs and large groups
|
Python
|
bsd-3-clause
|
lhupfeldt/multiconf
|
Test with many envs and large groups
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
|
<commit_before><commit_msg>Test with many envs and large groups<commit_after>
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
|
Test with many envs and large groups# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
|
<commit_before><commit_msg>Test with many envs and large groups<commit_after># Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
|
|
a4b242ebd107f9321cc5b87aee2cf608940007f4
|
product/migrations/0005_auto_20161015_1536.py
|
product/migrations/0005_auto_20161015_1536.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-15 15:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20161015_1534'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'permissions': (('manage_productcategories', 'Manage product categories'),)},
),
]
|
Make permission name more consistent.
|
Make permission name more consistent.
|
Python
|
mit
|
borderitsolutions/amadaa,borderitsolutions/amadaa,borderitsolutions/amadaa
|
Make permission name more consistent.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-15 15:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20161015_1534'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'permissions': (('manage_productcategories', 'Manage product categories'),)},
),
]
|
<commit_before><commit_msg>Make permission name more consistent.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-15 15:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20161015_1534'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'permissions': (('manage_productcategories', 'Manage product categories'),)},
),
]
|
Make permission name more consistent.# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-15 15:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20161015_1534'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'permissions': (('manage_productcategories', 'Manage product categories'),)},
),
]
|
<commit_before><commit_msg>Make permission name more consistent.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-15 15:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20161015_1534'),
]
operations = [
migrations.AlterModelOptions(
name='productcategory',
options={'permissions': (('manage_productcategories', 'Manage product categories'),)},
),
]
|
|
25c2502fce4556b5b72e96116745c83d1689677f
|
artists/tests/test_serializers.py
|
artists/tests/test_serializers.py
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
Add tests for artist serializers
|
Add tests for artist serializers
|
Python
|
bsd-3-clause
|
FreeMusicNinja/api.freemusic.ninja
|
Add tests for artist serializers
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
<commit_before><commit_msg>Add tests for artist serializers<commit_after>
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
Add tests for artist serializersfrom unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
<commit_before><commit_msg>Add tests for artist serializers<commit_after>from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
|
42af700af58588fccaa84f5348a5c854d095d1a9
|
code/ex2.2-simple_requests.py
|
code/ex2.2-simple_requests.py
|
from urllib.request import urlopen
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
def request_greetings():
responses = []
for url in URLS:
resp = urlopen(url)
responses.append(resp.read().decode('utf-8'))
texts = '\n'.join(responses)
return texts
if __name__ == "__main__":
t1 = time.time()
greetings = request_greetings()
print(time.time() - t1, "seconds passed")
print(greetings)
|
Add ex2.2: multiple simple requests
|
Add ex2.2: multiple simple requests
|
Python
|
mit
|
MA3STR0/PythonAsyncWorkshop
|
Add ex2.2: multiple simple requests
|
from urllib.request import urlopen
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
def request_greetings():
responses = []
for url in URLS:
resp = urlopen(url)
responses.append(resp.read().decode('utf-8'))
texts = '\n'.join(responses)
return texts
if __name__ == "__main__":
t1 = time.time()
greetings = request_greetings()
print(time.time() - t1, "seconds passed")
print(greetings)
|
<commit_before><commit_msg>Add ex2.2: multiple simple requests<commit_after>
|
from urllib.request import urlopen
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
def request_greetings():
responses = []
for url in URLS:
resp = urlopen(url)
responses.append(resp.read().decode('utf-8'))
texts = '\n'.join(responses)
return texts
if __name__ == "__main__":
t1 = time.time()
greetings = request_greetings()
print(time.time() - t1, "seconds passed")
print(greetings)
|
Add ex2.2: multiple simple requestsfrom urllib.request import urlopen
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
def request_greetings():
responses = []
for url in URLS:
resp = urlopen(url)
responses.append(resp.read().decode('utf-8'))
texts = '\n'.join(responses)
return texts
if __name__ == "__main__":
t1 = time.time()
greetings = request_greetings()
print(time.time() - t1, "seconds passed")
print(greetings)
|
<commit_before><commit_msg>Add ex2.2: multiple simple requests<commit_after>from urllib.request import urlopen
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
def request_greetings():
responses = []
for url in URLS:
resp = urlopen(url)
responses.append(resp.read().decode('utf-8'))
texts = '\n'.join(responses)
return texts
if __name__ == "__main__":
t1 = time.time()
greetings = request_greetings()
print(time.time() - t1, "seconds passed")
print(greetings)
|
|
36af8c98005bfb6d51344b80a59cb6e48c8b55fb
|
salt/output/overstatestage.py
|
salt/output/overstatestage.py
|
'''
Display clean output of an overstate stage
'''
#[{'group2': {'match': ['fedora17-2', 'fedora17-3'],
# 'require': ['group1'],
# 'sls': ['nginx', 'edit']}
# }
# ]
# Import Salt libs
import salt.utils
def output(data):
'''
Format the data for printing stage information from the overstate system
'''
colors = salt.utils.get_colors(__opts__.get('color'))
ostr = ''
for comp in data:
for name, stage in comp.items():
ostr += '{0}{1}:{2}\n'.format(colors['LIGHT_BLUE'], name, colors['ENDC'])
for key in sorted(stage):
ostr += ' {0}{1}:{2}{3}\n'.format(
colors['LIGHT_BLUE'],
key,
stage[key],
colors['ENDC'])
return ostr
|
Add outputter to display overstate stages
|
Add outputter to display overstate stages
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add outputter to display overstate stages
|
'''
Display clean output of an overstate stage
'''
#[{'group2': {'match': ['fedora17-2', 'fedora17-3'],
# 'require': ['group1'],
# 'sls': ['nginx', 'edit']}
# }
# ]
# Import Salt libs
import salt.utils
def output(data):
'''
Format the data for printing stage information from the overstate system
'''
colors = salt.utils.get_colors(__opts__.get('color'))
ostr = ''
for comp in data:
for name, stage in comp.items():
ostr += '{0}{1}:{2}\n'.format(colors['LIGHT_BLUE'], name, colors['ENDC'])
for key in sorted(stage):
ostr += ' {0}{1}:{2}{3}\n'.format(
colors['LIGHT_BLUE'],
key,
stage[key],
colors['ENDC'])
return ostr
|
<commit_before><commit_msg>Add outputter to display overstate stages<commit_after>
|
'''
Display clean output of an overstate stage
'''
#[{'group2': {'match': ['fedora17-2', 'fedora17-3'],
# 'require': ['group1'],
# 'sls': ['nginx', 'edit']}
# }
# ]
# Import Salt libs
import salt.utils
def output(data):
'''
Format the data for printing stage information from the overstate system
'''
colors = salt.utils.get_colors(__opts__.get('color'))
ostr = ''
for comp in data:
for name, stage in comp.items():
ostr += '{0}{1}:{2}\n'.format(colors['LIGHT_BLUE'], name, colors['ENDC'])
for key in sorted(stage):
ostr += ' {0}{1}:{2}{3}\n'.format(
colors['LIGHT_BLUE'],
key,
stage[key],
colors['ENDC'])
return ostr
|
Add outputter to display overstate stages'''
Display clean output of an overstate stage
'''
#[{'group2': {'match': ['fedora17-2', 'fedora17-3'],
# 'require': ['group1'],
# 'sls': ['nginx', 'edit']}
# }
# ]
# Import Salt libs
import salt.utils
def output(data):
'''
Format the data for printing stage information from the overstate system
'''
colors = salt.utils.get_colors(__opts__.get('color'))
ostr = ''
for comp in data:
for name, stage in comp.items():
ostr += '{0}{1}:{2}\n'.format(colors['LIGHT_BLUE'], name, colors['ENDC'])
for key in sorted(stage):
ostr += ' {0}{1}:{2}{3}\n'.format(
colors['LIGHT_BLUE'],
key,
stage[key],
colors['ENDC'])
return ostr
|
<commit_before><commit_msg>Add outputter to display overstate stages<commit_after>'''
Display clean output of an overstate stage
'''
#[{'group2': {'match': ['fedora17-2', 'fedora17-3'],
# 'require': ['group1'],
# 'sls': ['nginx', 'edit']}
# }
# ]
# Import Salt libs
import salt.utils
def output(data):
'''
Format the data for printing stage information from the overstate system
'''
colors = salt.utils.get_colors(__opts__.get('color'))
ostr = ''
for comp in data:
for name, stage in comp.items():
ostr += '{0}{1}:{2}\n'.format(colors['LIGHT_BLUE'], name, colors['ENDC'])
for key in sorted(stage):
ostr += ' {0}{1}:{2}{3}\n'.format(
colors['LIGHT_BLUE'],
key,
stage[key],
colors['ENDC'])
return ostr
|
|
626662f0f3ef2ce7de63c424da89263443243e97
|
scrapy/contrib/spiderstate.py
|
scrapy/contrib/spiderstate.py
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn, 'rb') as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
Fix SpiderState bug in Windows platforms
|
Fix SpiderState bug in Windows platforms
The spider state file was not opened in binary mode.
|
Python
|
bsd-3-clause
|
yarikoptic/scrapy,godfreyy/scrapy,coderabhishek/scrapy,coderabhishek/scrapy,yidongliu/scrapy,yusofm/scrapy,hectoruelo/scrapy,csalazar/scrapy,Timeship/scrapy,wangjun/scrapy,Zephor5/scrapy,profjrr/scrapy,Geeglee/scrapy,olafdietsche/scrapy,Geeglee/scrapy,beni55/scrapy,kimimj/scrapy,mlyundin/scrapy,amboxer21/scrapy,ramiro/scrapy,lacrazyboy/scrapy,agusc/scrapy,Allianzcortex/scrapy,yarikoptic/scrapy,yidongliu/scrapy,legendtkl/scrapy,famorted/scrapy,arush0311/scrapy,olorz/scrapy,WilliamKinaan/scrapy,agusc/scrapy,fafaman/scrapy,IvanGavran/scrapy,moraesnicol/scrapy,elacuesta/scrapy,1yvT0s/scrapy,csalazar/scrapy,TarasRudnyk/scrapy,KublaikhanGeek/scrapy,finfish/scrapy,stenskjaer/scrapy,godfreyy/scrapy,shaform/scrapy,CENDARI/scrapy,dgillis/scrapy,hwsyy/scrapy,pablohoffman/scrapy,Partoo/scrapy,rahul-c1/scrapy,Zephor5/scrapy,dacjames/scrapy,CENDARI/scrapy,johnardavies/scrapy,curita/scrapy,tntC4stl3/scrapy,gbirke/scrapy,foromer4/scrapy,profjrr/scrapy,eLRuLL/scrapy,umrashrf/scrapy,redapple/scrapy,moraesnicol/scrapy,carlosp420/scrapy,dacjames/scrapy,Slater-Victoroff/scrapy,arush0311/scrapy,songfj/scrapy,cyberplant/scrapy,heamon7/scrapy,KublaikhanGeek/scrapy,tntC4stl3/scrapy,sigma-random/scrapy,tliber/scrapy,hwsyy/scrapy,wzyuliyang/scrapy,barraponto/scrapy,WilliamKinaan/scrapy,jiezhu2007/scrapy,agusc/scrapy,emschorsch/scrapy,chekunkov/scrapy,gnemoug/scrapy,nguyenhongson03/scrapy,xiao26/scrapy,dangra/scrapy,zackslash/scrapy,kalessin/scrapy,arush0311/scrapy,darkrho/scrapy-scrapy,rolando-contrib/scrapy,moraesnicol/scrapy,nowopen/scrapy,snowdream1314/scrapy,fpy171/scrapy,pawelmhm/scrapy,elijah513/scrapy,hansenDise/scrapy,nguyenhongson03/scrapy,tagatac/scrapy,huoxudong125/scrapy,agreen/scrapy,Slater-Victoroff/scrapy,Adai0808/scrapy-1,eliasdorneles/scrapy,dangra/scrapy,ENjOyAbLE1991/scrapy,Djlavoy/scrapy,zorojean/scrapy,fqul/scrapy,fontenele/scrapy,aivarsk/scrapy,avtoritet/scrapy,joshlk/scrapy,yusofm/scrapy,Slater-Victoroff/scrapy,joshlk/scrapy,mgedmin/scrapy,WilliamKinaan/scrapy,zjuwangg/scrapy,legendtkl/scrapy,zhangtao11/scrapy,wenyu1001/scrapy,ssh-odoo/scrapy,finfish/scrapy,wenyu1001/scrapy,pablohoffman/scrapy,dhenyjarasandy/scrapy,rdowinton/scrapy,ENjOyAbLE1991/scrapy,dhenyjarasandy/scrapy,dracony/scrapy,pablohoffman/scrapy,Allianzcortex/scrapy,z-fork/scrapy,Djlavoy/scrapy,cyrixhero/scrapy,devGregA/scrapy,bmess/scrapy,smaty1/scrapy,xiao26/scrapy,beni55/scrapy,haiiiiiyun/scrapy,scrapy/scrapy,nowopen/scrapy,cursesun/scrapy,Timeship/scrapy,codebhendi/scrapy,pfctdayelise/scrapy,wangjun/scrapy,jiezhu2007/scrapy,finfish/scrapy,zjuwangg/scrapy,elijah513/scrapy,elijah513/scrapy,kazitanvirahsan/scrapy,taito/scrapy,rahulsharma1991/scrapy,devGregA/scrapy,rklabs/scrapy,nikgr95/scrapy,pfctdayelise/scrapy,pombredanne/scrapy,nfunato/scrapy,Parlin-Galanodel/scrapy,ssh-odoo/scrapy,fpy171/scrapy,kimimj/scrapy,snowdream1314/scrapy,songfj/scrapy,legendtkl/scrapy,scorphus/scrapy,GregoryVigoTorres/scrapy,dracony/scrapy,tntC4stl3/scrapy,nfunato/scrapy,cleydson/scrapy,kashyap32/scrapy,scrapy/scrapy,yarikoptic/scrapy,stenskjaer/scrapy,Adai0808/scrapy-1,ramiro/scrapy,zhangtao11/scrapy,olorz/scrapy,Zephor5/scrapy,starrify/scrapy,godfreyy/scrapy,w495/scrapy,cleydson/scrapy,rootAvish/scrapy,umrashrf/scrapy,Allianzcortex/scrapy,dgillis/scrapy,nikgr95/scrapy,nfunato/scrapy,jdemaeyer/scrapy,raphaelfruneaux/scrapy,livepy/scrapy,cyrixhero/scrapy,sardok/scrapy,foromer4/scrapy,Ryezhang/scrapy,jc0n/scrapy,Parlin-Galanodel/scrapy,beni55/scrapy,w495/scrapy,KublaikhanGeek/scrapy,Cnfc19932/scrapy,tagatac/scrapy,tliber/scrapy,raphaelfruneaux/scrapy,Digenis/scrapy,jeffreyjinfeng/scrapy,1yvT0s/scrapy,shaform/scrapy,hbwzhsh/scrapy,ENjOyAbLE1991/scrapy,agreen/scrapy,wujuguang/scrapy,URXtech/scrapy,redapple/scrapy,rolando/scrapy,crasker/scrapy,Adai0808/scrapy-1,wzyuliyang/scrapy,liyy7/scrapy,foromer4/scrapy,nikgr95/scrapy,famorted/scrapy,URXtech/scrapy,liyy7/scrapy,wangjun/scrapy,rahulsharma1991/scrapy,Lucifer-Kim/scrapy,livepy/scrapy,hbwzhsh/scrapy,rootAvish/scrapy,hectoruelo/scrapy,irwinlove/scrapy,1yvT0s/scrapy,codebhendi/scrapy,Bourneer/scrapy,Timeship/scrapy,elacuesta/scrapy,starrify/scrapy,z-fork/scrapy,lacrazyboy/scrapy,bmess/scrapy,scorphus/scrapy,CodeJuan/scrapy,farhan0581/scrapy,YeelerG/scrapy,nguyenhongson03/scrapy,Preetwinder/scrapy,huoxudong125/scrapy,haiiiiiyun/scrapy,webmakin/scrapy,ArturGaspar/scrapy,CodeJuan/scrapy,pombredanne/scrapy,cursesun/scrapy,chekunkov/scrapy,cursesun/scrapy,farhan0581/scrapy,curita/scrapy,TarasRudnyk/scrapy,Preetwinder/scrapy,agreen/scrapy,zackslash/scrapy,barraponto/scrapy,emschorsch/scrapy,wenyu1001/scrapy,rahul-c1/scrapy,redapple/scrapy,fqul/scrapy,jc0n/scrapy,dracony/scrapy,eliasdorneles/scrapy,Djlavoy/scrapy,dhenyjarasandy/scrapy,scrapy/scrapy,bmess/scrapy,fontenele/scrapy,rdowinton/scrapy,URXtech/scrapy,CENDARI/scrapy,kmike/scrapy,Chenmxs/scrapy,OpenWhere/scrapy,smaty1/scrapy,farhan0581/scrapy,taito/scrapy,barraponto/scrapy,jiezhu2007/scrapy,Digenis/scrapy,heamon7/scrapy,kmike/scrapy,jorik041/scrapy,hansenDise/scrapy,github-account-because-they-want-it/scrapy,amboxer21/scrapy,AaronTao1990/scrapy,ssteo/scrapy,johnardavies/scrapy,tagatac/scrapy,zorojean/scrapy,olafdietsche/scrapy,ramiro/scrapy,rolando/scrapy,OpenWhere/scrapy,hyrole/scrapy,yidongliu/scrapy,curita/scrapy,rolando-contrib/scrapy,avtoritet/scrapy,nowopen/scrapy,z-fork/scrapy,w495/scrapy,amboxer21/scrapy,Digenis/scrapy,scorphus/scrapy,rolando/scrapy,pranjalpatil/scrapy,darkrho/scrapy-scrapy,hwsyy/scrapy,rklabs/scrapy,fafaman/scrapy,aivarsk/scrapy,olorz/scrapy,carlosp420/scrapy,GregoryVigoTorres/scrapy,eLRuLL/scrapy,AaronTao1990/scrapy,jorik041/scrapy,ssteo/scrapy,eliasdorneles/scrapy,emschorsch/scrapy,irwinlove/scrapy,huoxudong125/scrapy,Partoo/scrapy,IvanGavran/scrapy,ArturGaspar/scrapy,crasker/scrapy,rahul-c1/scrapy,carlosp420/scrapy,famorted/scrapy,chekunkov/scrapy,rolando-contrib/scrapy,jorik041/scrapy,gnemoug/scrapy,Chenmxs/scrapy,Cnfc19932/scrapy,ssh-odoo/scrapy,kimimj/scrapy,webmakin/scrapy,cyberplant/scrapy,elacuesta/scrapy,darkrho/scrapy-scrapy,avtoritet/scrapy,jdemaeyer/scrapy,joshlk/scrapy,rahulsharma1991/scrapy,devGregA/scrapy,heamon7/scrapy,kalessin/scrapy,OpenWhere/scrapy,smaty1/scrapy,zjuwangg/scrapy,sardok/scrapy,gbirke/scrapy,dangra/scrapy,irwinlove/scrapy,jdemaeyer/scrapy,YeelerG/scrapy,nett55/scrapy,Lucifer-Kim/scrapy,jamesblunt/scrapy,jc0n/scrapy,cyrixhero/scrapy,cyberplant/scrapy,jeffreyjinfeng/scrapy,stenskjaer/scrapy,hansenDise/scrapy,fafaman/scrapy,tliber/scrapy,Parlin-Galanodel/scrapy,nett55/scrapy,github-account-because-they-want-it/scrapy,hyrole/scrapy,hectoruelo/scrapy,zorojean/scrapy,xiao26/scrapy,kazitanvirahsan/scrapy,Bourneer/scrapy,Lucifer-Kim/scrapy,csalazar/scrapy,JacobStevenR/scrapy,rklabs/scrapy,pawelmhm/scrapy,ashishnerkar1/scrapy,Bourneer/scrapy,sigma-random/scrapy,webmakin/scrapy,dacjames/scrapy,umrashrf/scrapy,hbwzhsh/scrapy,jeffreyjinfeng/scrapy,IvanGavran/scrapy,nett55/scrapy,kmike/scrapy,CodeJuan/scrapy,ylcolala/scrapy,raphaelfruneaux/scrapy,pranjalpatil/scrapy,aivarsk/scrapy,hyrole/scrapy,Geeglee/scrapy,crasker/scrapy,Ryezhang/scrapy,songfj/scrapy,YeelerG/scrapy,Partoo/scrapy,ssteo/scrapy,rootAvish/scrapy,mlyundin/scrapy,wujuguang/scrapy,kazitanvirahsan/scrapy,mgedmin/scrapy,livepy/scrapy,kashyap32/scrapy,fontenele/scrapy,github-account-because-they-want-it/scrapy,profjrr/scrapy,fqul/scrapy,starrify/scrapy,snowdream1314/scrapy,TarasRudnyk/scrapy,dgillis/scrapy,zhangtao11/scrapy,coderabhishek/scrapy,pombredanne/scrapy,mlyundin/scrapy,johnardavies/scrapy,liyy7/scrapy,ArturGaspar/scrapy,yusofm/scrapy,wzyuliyang/scrapy,ylcolala/scrapy,shaform/scrapy,codebhendi/scrapy,cleydson/scrapy,pawelmhm/scrapy,rdowinton/scrapy,AaronTao1990/scrapy,lacrazyboy/scrapy,ashishnerkar1/scrapy,taito/scrapy,Cnfc19932/scrapy,Ryezhang/scrapy,kalessin/scrapy,ylcolala/scrapy,haiiiiiyun/scrapy,kashyap32/scrapy,jamesblunt/scrapy,olafdietsche/scrapy,pranjalpatil/scrapy,wujuguang/scrapy,fpy171/scrapy,GregoryVigoTorres/scrapy,mgedmin/scrapy,eLRuLL/scrapy,zackslash/scrapy,JacobStevenR/scrapy,JacobStevenR/scrapy,Chenmxs/scrapy,Preetwinder/scrapy,pfctdayelise/scrapy
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
Fix SpiderState bug in Windows platforms
The spider state file was not opened in binary mode.
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn, 'rb') as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
<commit_before>import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
<commit_msg>Fix SpiderState bug in Windows platforms
The spider state file was not opened in binary mode.<commit_after>
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn, 'rb') as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
Fix SpiderState bug in Windows platforms
The spider state file was not opened in binary mode.import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn, 'rb') as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
<commit_before>import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
<commit_msg>Fix SpiderState bug in Windows platforms
The spider state file was not opened in binary mode.<commit_after>import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn, 'rb') as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
5fc6d9fc05bc4cae5588489c576744a518155461
|
trig.py
|
trig.py
|
import function
import math
class Sin(function.Function):
def _evaluate(self, x):
return math.sin(x)
class Cos(function.Function):
def _evaluate(self, x):
return math.cos(x)
class Tan(function.Function):
def _evaluate(self, x):
sin = Sin()
cos = Cos()
if cos(x) == 0:
raise ZeroDivisionError()
return sin(x) / cos(x)
def main():
sin = Sin()
cos = Cos()
tan = Tan()
assert(sin(0) == 0)
assert(cos(0) == 1)
assert(tan(0) == 0)
assert((tan + cos + sin)(0) == 1)
if __name__ == "__main__":
main()
|
Add Sin, Cos and Tan function classes
|
Add Sin, Cos and Tan function classes
|
Python
|
mit
|
jackromo/mathLibPy
|
Add Sin, Cos and Tan function classes
|
import function
import math
class Sin(function.Function):
def _evaluate(self, x):
return math.sin(x)
class Cos(function.Function):
def _evaluate(self, x):
return math.cos(x)
class Tan(function.Function):
def _evaluate(self, x):
sin = Sin()
cos = Cos()
if cos(x) == 0:
raise ZeroDivisionError()
return sin(x) / cos(x)
def main():
sin = Sin()
cos = Cos()
tan = Tan()
assert(sin(0) == 0)
assert(cos(0) == 1)
assert(tan(0) == 0)
assert((tan + cos + sin)(0) == 1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Sin, Cos and Tan function classes<commit_after>
|
import function
import math
class Sin(function.Function):
def _evaluate(self, x):
return math.sin(x)
class Cos(function.Function):
def _evaluate(self, x):
return math.cos(x)
class Tan(function.Function):
def _evaluate(self, x):
sin = Sin()
cos = Cos()
if cos(x) == 0:
raise ZeroDivisionError()
return sin(x) / cos(x)
def main():
sin = Sin()
cos = Cos()
tan = Tan()
assert(sin(0) == 0)
assert(cos(0) == 1)
assert(tan(0) == 0)
assert((tan + cos + sin)(0) == 1)
if __name__ == "__main__":
main()
|
Add Sin, Cos and Tan function classesimport function
import math
class Sin(function.Function):
def _evaluate(self, x):
return math.sin(x)
class Cos(function.Function):
def _evaluate(self, x):
return math.cos(x)
class Tan(function.Function):
def _evaluate(self, x):
sin = Sin()
cos = Cos()
if cos(x) == 0:
raise ZeroDivisionError()
return sin(x) / cos(x)
def main():
sin = Sin()
cos = Cos()
tan = Tan()
assert(sin(0) == 0)
assert(cos(0) == 1)
assert(tan(0) == 0)
assert((tan + cos + sin)(0) == 1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Sin, Cos and Tan function classes<commit_after>import function
import math
class Sin(function.Function):
def _evaluate(self, x):
return math.sin(x)
class Cos(function.Function):
def _evaluate(self, x):
return math.cos(x)
class Tan(function.Function):
def _evaluate(self, x):
sin = Sin()
cos = Cos()
if cos(x) == 0:
raise ZeroDivisionError()
return sin(x) / cos(x)
def main():
sin = Sin()
cos = Cos()
tan = Tan()
assert(sin(0) == 0)
assert(cos(0) == 1)
assert(tan(0) == 0)
assert((tan + cos + sin)(0) == 1)
if __name__ == "__main__":
main()
|
|
1d55ad8fb8309918e7d41b4f443e16ebefbb1895
|
populate_elasticsearch.py
|
populate_elasticsearch.py
|
#!/usr/bin/env python
import os
import sys
import json
import requests
import multiprocessing
import mwparserfromhell as mwp
ES_URL = 'http://localhost:9200'
SECTIONS_TO_REMOVE = set([
'references', 'see also', 'external links', 'footnotes'
])
def put_document(path):
id = os.path.basename(path)
doc = json.load(file(path))
wdoc = mwp.parse(doc['wikitext'])
for section in wdoc.get_sections(include_headings = True):
try:
title = section.get(0).title.strip().lower()
if title in SECTIONS_TO_REMOVE:
wdoc.remove(section)
except (IndexError, AttributeError):
# No heading or empty section?
pass
doc['wikitext'] = wdoc.strip_code()
response = requests.put(
ES_URL + '/' + sys.argv[2] + '/' + id, json.dumps(doc))
print response.content
pool = multiprocessing.Pool()
pool.map(put_document, [
os.path.join(sys.argv[1], id)
for id in os.listdir(sys.argv[1])])
|
Add script for populating elasticsearch.
|
Add script for populating elasticsearch.
|
Python
|
mit
|
eggpi/similarity,eggpi/similarity,eggpi/similarity
|
Add script for populating elasticsearch.
|
#!/usr/bin/env python
import os
import sys
import json
import requests
import multiprocessing
import mwparserfromhell as mwp
ES_URL = 'http://localhost:9200'
SECTIONS_TO_REMOVE = set([
'references', 'see also', 'external links', 'footnotes'
])
def put_document(path):
id = os.path.basename(path)
doc = json.load(file(path))
wdoc = mwp.parse(doc['wikitext'])
for section in wdoc.get_sections(include_headings = True):
try:
title = section.get(0).title.strip().lower()
if title in SECTIONS_TO_REMOVE:
wdoc.remove(section)
except (IndexError, AttributeError):
# No heading or empty section?
pass
doc['wikitext'] = wdoc.strip_code()
response = requests.put(
ES_URL + '/' + sys.argv[2] + '/' + id, json.dumps(doc))
print response.content
pool = multiprocessing.Pool()
pool.map(put_document, [
os.path.join(sys.argv[1], id)
for id in os.listdir(sys.argv[1])])
|
<commit_before><commit_msg>Add script for populating elasticsearch.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import json
import requests
import multiprocessing
import mwparserfromhell as mwp
ES_URL = 'http://localhost:9200'
SECTIONS_TO_REMOVE = set([
'references', 'see also', 'external links', 'footnotes'
])
def put_document(path):
id = os.path.basename(path)
doc = json.load(file(path))
wdoc = mwp.parse(doc['wikitext'])
for section in wdoc.get_sections(include_headings = True):
try:
title = section.get(0).title.strip().lower()
if title in SECTIONS_TO_REMOVE:
wdoc.remove(section)
except (IndexError, AttributeError):
# No heading or empty section?
pass
doc['wikitext'] = wdoc.strip_code()
response = requests.put(
ES_URL + '/' + sys.argv[2] + '/' + id, json.dumps(doc))
print response.content
pool = multiprocessing.Pool()
pool.map(put_document, [
os.path.join(sys.argv[1], id)
for id in os.listdir(sys.argv[1])])
|
Add script for populating elasticsearch.#!/usr/bin/env python
import os
import sys
import json
import requests
import multiprocessing
import mwparserfromhell as mwp
ES_URL = 'http://localhost:9200'
SECTIONS_TO_REMOVE = set([
'references', 'see also', 'external links', 'footnotes'
])
def put_document(path):
id = os.path.basename(path)
doc = json.load(file(path))
wdoc = mwp.parse(doc['wikitext'])
for section in wdoc.get_sections(include_headings = True):
try:
title = section.get(0).title.strip().lower()
if title in SECTIONS_TO_REMOVE:
wdoc.remove(section)
except (IndexError, AttributeError):
# No heading or empty section?
pass
doc['wikitext'] = wdoc.strip_code()
response = requests.put(
ES_URL + '/' + sys.argv[2] + '/' + id, json.dumps(doc))
print response.content
pool = multiprocessing.Pool()
pool.map(put_document, [
os.path.join(sys.argv[1], id)
for id in os.listdir(sys.argv[1])])
|
<commit_before><commit_msg>Add script for populating elasticsearch.<commit_after>#!/usr/bin/env python
import os
import sys
import json
import requests
import multiprocessing
import mwparserfromhell as mwp
ES_URL = 'http://localhost:9200'
SECTIONS_TO_REMOVE = set([
'references', 'see also', 'external links', 'footnotes'
])
def put_document(path):
id = os.path.basename(path)
doc = json.load(file(path))
wdoc = mwp.parse(doc['wikitext'])
for section in wdoc.get_sections(include_headings = True):
try:
title = section.get(0).title.strip().lower()
if title in SECTIONS_TO_REMOVE:
wdoc.remove(section)
except (IndexError, AttributeError):
# No heading or empty section?
pass
doc['wikitext'] = wdoc.strip_code()
response = requests.put(
ES_URL + '/' + sys.argv[2] + '/' + id, json.dumps(doc))
print response.content
pool = multiprocessing.Pool()
pool.map(put_document, [
os.path.join(sys.argv[1], id)
for id in os.listdir(sys.argv[1])])
|
|
ff2b86d90ecbc2da25ddc05b0430555861104cac
|
examples/hybrid_manager_example.py
|
examples/hybrid_manager_example.py
|
# This example shows how to configure Jupyter/IPython to use the more complex
# HybridContentsManager.
# A HybridContentsManager implements the contents API by delegating requests to
# other contents managers. Each sub-manager is associated with a root
# directory, and all requests for data within that directory are routed to the
# sub-manager.
# A HybridContentsManager needs two pieces of information at configuration time:
# 1. ``manager_classes``, a map from root directory to the type of contents
# manager to use for that root directory.
# 2. ``manager_kwargs``, a map from root directory to a dict of keywords to
# pass to the associated sub-manager.
from pgcontents.pgmanager import PostgresContentsManager
from pgcontents.hybridmanager import HybridContentsManager
# Using Jupyter (IPython >= 4.0).
# from notebook.services.contents.filemanager import FileContentsManager
# Using Legacy IPython.
from IPython.html.services.contents.filemanager import FileContentsManager
c = get_config()
c.NotebookApp.contents_manager_class = HybridContentsManager
c.HybridContentsManager.manager_classes = {
# Associate the root directory with a PostgresContentsManager.
# This manager will receive all requests that don't fall under any of the
# other managers.
'': PostgresContentsManager,
# Associate /directory with a FileContentsManager.
'directory': FileContentsManager,
# Associate /other_directory with another FileContentsManager.
'other_directory': FileContentsManager,
}
c.HybridContentsManager.manager_kwargs = {
# Args for root PostgresContentsManager.
'': {
'db_url': 'postgresql://ssanderson@/pgcontents_testing',
'user_id': 'my_awesome_username',
'max_file_size_bytes': 1000000, # Optional
},
# Args for the FileContentsManager mapped to /directory
'directory': {
'root_dir': '/home/ssanderson/some_local_directory',
},
# Args for the FileContentsManager mapped to /other_directory
'other_directory': {
'root_dir': '/home/ssanderson/some_other_local_directory',
}
}
|
Add an example for HybridContentsManager.
|
DOC: Add an example for HybridContentsManager.
|
Python
|
apache-2.0
|
quantopian/pgcontents
|
DOC: Add an example for HybridContentsManager.
|
# This example shows how to configure Jupyter/IPython to use the more complex
# HybridContentsManager.
# A HybridContentsManager implements the contents API by delegating requests to
# other contents managers. Each sub-manager is associated with a root
# directory, and all requests for data within that directory are routed to the
# sub-manager.
# A HybridContentsManager needs two pieces of information at configuration time:
# 1. ``manager_classes``, a map from root directory to the type of contents
# manager to use for that root directory.
# 2. ``manager_kwargs``, a map from root directory to a dict of keywords to
# pass to the associated sub-manager.
from pgcontents.pgmanager import PostgresContentsManager
from pgcontents.hybridmanager import HybridContentsManager
# Using Jupyter (IPython >= 4.0).
# from notebook.services.contents.filemanager import FileContentsManager
# Using Legacy IPython.
from IPython.html.services.contents.filemanager import FileContentsManager
c = get_config()
c.NotebookApp.contents_manager_class = HybridContentsManager
c.HybridContentsManager.manager_classes = {
# Associate the root directory with a PostgresContentsManager.
# This manager will receive all requests that don't fall under any of the
# other managers.
'': PostgresContentsManager,
# Associate /directory with a FileContentsManager.
'directory': FileContentsManager,
# Associate /other_directory with another FileContentsManager.
'other_directory': FileContentsManager,
}
c.HybridContentsManager.manager_kwargs = {
# Args for root PostgresContentsManager.
'': {
'db_url': 'postgresql://ssanderson@/pgcontents_testing',
'user_id': 'my_awesome_username',
'max_file_size_bytes': 1000000, # Optional
},
# Args for the FileContentsManager mapped to /directory
'directory': {
'root_dir': '/home/ssanderson/some_local_directory',
},
# Args for the FileContentsManager mapped to /other_directory
'other_directory': {
'root_dir': '/home/ssanderson/some_other_local_directory',
}
}
|
<commit_before><commit_msg>DOC: Add an example for HybridContentsManager.<commit_after>
|
# This example shows how to configure Jupyter/IPython to use the more complex
# HybridContentsManager.
# A HybridContentsManager implements the contents API by delegating requests to
# other contents managers. Each sub-manager is associated with a root
# directory, and all requests for data within that directory are routed to the
# sub-manager.
# A HybridContentsManager needs two pieces of information at configuration time:
# 1. ``manager_classes``, a map from root directory to the type of contents
# manager to use for that root directory.
# 2. ``manager_kwargs``, a map from root directory to a dict of keywords to
# pass to the associated sub-manager.
from pgcontents.pgmanager import PostgresContentsManager
from pgcontents.hybridmanager import HybridContentsManager
# Using Jupyter (IPython >= 4.0).
# from notebook.services.contents.filemanager import FileContentsManager
# Using Legacy IPython.
from IPython.html.services.contents.filemanager import FileContentsManager
c = get_config()
c.NotebookApp.contents_manager_class = HybridContentsManager
c.HybridContentsManager.manager_classes = {
# Associate the root directory with a PostgresContentsManager.
# This manager will receive all requests that don't fall under any of the
# other managers.
'': PostgresContentsManager,
# Associate /directory with a FileContentsManager.
'directory': FileContentsManager,
# Associate /other_directory with another FileContentsManager.
'other_directory': FileContentsManager,
}
c.HybridContentsManager.manager_kwargs = {
# Args for root PostgresContentsManager.
'': {
'db_url': 'postgresql://ssanderson@/pgcontents_testing',
'user_id': 'my_awesome_username',
'max_file_size_bytes': 1000000, # Optional
},
# Args for the FileContentsManager mapped to /directory
'directory': {
'root_dir': '/home/ssanderson/some_local_directory',
},
# Args for the FileContentsManager mapped to /other_directory
'other_directory': {
'root_dir': '/home/ssanderson/some_other_local_directory',
}
}
|
DOC: Add an example for HybridContentsManager.# This example shows how to configure Jupyter/IPython to use the more complex
# HybridContentsManager.
# A HybridContentsManager implements the contents API by delegating requests to
# other contents managers. Each sub-manager is associated with a root
# directory, and all requests for data within that directory are routed to the
# sub-manager.
# A HybridContentsManager needs two pieces of information at configuration time:
# 1. ``manager_classes``, a map from root directory to the type of contents
# manager to use for that root directory.
# 2. ``manager_kwargs``, a map from root directory to a dict of keywords to
# pass to the associated sub-manager.
from pgcontents.pgmanager import PostgresContentsManager
from pgcontents.hybridmanager import HybridContentsManager
# Using Jupyter (IPython >= 4.0).
# from notebook.services.contents.filemanager import FileContentsManager
# Using Legacy IPython.
from IPython.html.services.contents.filemanager import FileContentsManager
c = get_config()
c.NotebookApp.contents_manager_class = HybridContentsManager
c.HybridContentsManager.manager_classes = {
# Associate the root directory with a PostgresContentsManager.
# This manager will receive all requests that don't fall under any of the
# other managers.
'': PostgresContentsManager,
# Associate /directory with a FileContentsManager.
'directory': FileContentsManager,
# Associate /other_directory with another FileContentsManager.
'other_directory': FileContentsManager,
}
c.HybridContentsManager.manager_kwargs = {
# Args for root PostgresContentsManager.
'': {
'db_url': 'postgresql://ssanderson@/pgcontents_testing',
'user_id': 'my_awesome_username',
'max_file_size_bytes': 1000000, # Optional
},
# Args for the FileContentsManager mapped to /directory
'directory': {
'root_dir': '/home/ssanderson/some_local_directory',
},
# Args for the FileContentsManager mapped to /other_directory
'other_directory': {
'root_dir': '/home/ssanderson/some_other_local_directory',
}
}
|
<commit_before><commit_msg>DOC: Add an example for HybridContentsManager.<commit_after># This example shows how to configure Jupyter/IPython to use the more complex
# HybridContentsManager.
# A HybridContentsManager implements the contents API by delegating requests to
# other contents managers. Each sub-manager is associated with a root
# directory, and all requests for data within that directory are routed to the
# sub-manager.
# A HybridContentsManager needs two pieces of information at configuration time:
# 1. ``manager_classes``, a map from root directory to the type of contents
# manager to use for that root directory.
# 2. ``manager_kwargs``, a map from root directory to a dict of keywords to
# pass to the associated sub-manager.
from pgcontents.pgmanager import PostgresContentsManager
from pgcontents.hybridmanager import HybridContentsManager
# Using Jupyter (IPython >= 4.0).
# from notebook.services.contents.filemanager import FileContentsManager
# Using Legacy IPython.
from IPython.html.services.contents.filemanager import FileContentsManager
c = get_config()
c.NotebookApp.contents_manager_class = HybridContentsManager
c.HybridContentsManager.manager_classes = {
# Associate the root directory with a PostgresContentsManager.
# This manager will receive all requests that don't fall under any of the
# other managers.
'': PostgresContentsManager,
# Associate /directory with a FileContentsManager.
'directory': FileContentsManager,
# Associate /other_directory with another FileContentsManager.
'other_directory': FileContentsManager,
}
c.HybridContentsManager.manager_kwargs = {
# Args for root PostgresContentsManager.
'': {
'db_url': 'postgresql://ssanderson@/pgcontents_testing',
'user_id': 'my_awesome_username',
'max_file_size_bytes': 1000000, # Optional
},
# Args for the FileContentsManager mapped to /directory
'directory': {
'root_dir': '/home/ssanderson/some_local_directory',
},
# Args for the FileContentsManager mapped to /other_directory
'other_directory': {
'root_dir': '/home/ssanderson/some_other_local_directory',
}
}
|
|
63eaadad7a5169ec6219d33f9b39ce27859684c2
|
notebooks/test_notebooks.py
|
notebooks/test_notebooks.py
|
# -*- coding: utf-8 -*-
'''
Checks notebook execution result.
Equal to this command + error management:
jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb
For jupyter configuration information, run: jupyter --path
'''
# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
notebook_filename = 'demo.ipynb'
run_path = '.'
notebook_filename_out = 'executed_notebook.ipynb'
with io.open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python')
try:
out = ep.preprocess(nb, {'metadata': {'path': run_path}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError
nbformat.write(nb, f)
|
Add script to automate notebooks testing
|
Add script to automate notebooks testing
|
Python
|
agpl-3.0
|
openfisca/openfisca-tunisia,openfisca/openfisca-tunisia
|
Add script to automate notebooks testing
|
# -*- coding: utf-8 -*-
'''
Checks notebook execution result.
Equal to this command + error management:
jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb
For jupyter configuration information, run: jupyter --path
'''
# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
notebook_filename = 'demo.ipynb'
run_path = '.'
notebook_filename_out = 'executed_notebook.ipynb'
with io.open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python')
try:
out = ep.preprocess(nb, {'metadata': {'path': run_path}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError
nbformat.write(nb, f)
|
<commit_before><commit_msg>Add script to automate notebooks testing<commit_after>
|
# -*- coding: utf-8 -*-
'''
Checks notebook execution result.
Equal to this command + error management:
jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb
For jupyter configuration information, run: jupyter --path
'''
# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
notebook_filename = 'demo.ipynb'
run_path = '.'
notebook_filename_out = 'executed_notebook.ipynb'
with io.open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python')
try:
out = ep.preprocess(nb, {'metadata': {'path': run_path}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError
nbformat.write(nb, f)
|
Add script to automate notebooks testing# -*- coding: utf-8 -*-
'''
Checks notebook execution result.
Equal to this command + error management:
jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb
For jupyter configuration information, run: jupyter --path
'''
# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
notebook_filename = 'demo.ipynb'
run_path = '.'
notebook_filename_out = 'executed_notebook.ipynb'
with io.open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python')
try:
out = ep.preprocess(nb, {'metadata': {'path': run_path}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError
nbformat.write(nb, f)
|
<commit_before><commit_msg>Add script to automate notebooks testing<commit_after># -*- coding: utf-8 -*-
'''
Checks notebook execution result.
Equal to this command + error management:
jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb
For jupyter configuration information, run: jupyter --path
'''
# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel
import io
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
notebook_filename = 'demo.ipynb'
run_path = '.'
notebook_filename_out = 'executed_notebook.ipynb'
with io.open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name='python')
try:
out = ep.preprocess(nb, {'metadata': {'path': run_path}})
except CellExecutionError:
out = None
msg = 'Error executing the notebook "%s".\n\n' % notebook_filename
msg += 'See notebook "%s" for the traceback.' % notebook_filename_out
print(msg)
raise
finally:
with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError
nbformat.write(nb, f)
|
|
f4bb4d17214f4e359455cf7b5fb7ab973508049b
|
bin/diffMatcher.py
|
bin/diffMatcher.py
|
#!/usr/bin/python
# coding=utf-8
import subprocess
class DiffMatcher(object):
def __init__(self, listA, listB):
self.listA = listA
self.listB = listB
def create_diff(self, listA, listB,case_sensitive):
new_list = []
#compare the two files
try:
if (case_sensitive):
#ignore case sensitiveness
inp = subprocess.check_output(['diff', '-iy', listA.name, listB.name])
else:
inp = subprocess.check_output(['diff', '-y', listA.name, listB.name])
# diff exits with 1 if outputs mismatch... grml
except subprocess.CalledProcessError, e:
inp = e.output
inp = inp.decode("utf-8").split("\n")
#create list of difference
for entry in inp:
g = entry.replace("\t"," ")
g = g.split()
new_list.append(g)
del new_list[-1]
return new_list
|
Add missing module for merge script
|
Add missing module for merge script
|
Python
|
mit
|
comphist/cora,comphist/cora,comphist/cora,comphist/cora,comphist/cora
|
Add missing module for merge script
|
#!/usr/bin/python
# coding=utf-8
import subprocess
class DiffMatcher(object):
def __init__(self, listA, listB):
self.listA = listA
self.listB = listB
def create_diff(self, listA, listB,case_sensitive):
new_list = []
#compare the two files
try:
if (case_sensitive):
#ignore case sensitiveness
inp = subprocess.check_output(['diff', '-iy', listA.name, listB.name])
else:
inp = subprocess.check_output(['diff', '-y', listA.name, listB.name])
# diff exits with 1 if outputs mismatch... grml
except subprocess.CalledProcessError, e:
inp = e.output
inp = inp.decode("utf-8").split("\n")
#create list of difference
for entry in inp:
g = entry.replace("\t"," ")
g = g.split()
new_list.append(g)
del new_list[-1]
return new_list
|
<commit_before><commit_msg>Add missing module for merge script<commit_after>
|
#!/usr/bin/python
# coding=utf-8
import subprocess
class DiffMatcher(object):
def __init__(self, listA, listB):
self.listA = listA
self.listB = listB
def create_diff(self, listA, listB,case_sensitive):
new_list = []
#compare the two files
try:
if (case_sensitive):
#ignore case sensitiveness
inp = subprocess.check_output(['diff', '-iy', listA.name, listB.name])
else:
inp = subprocess.check_output(['diff', '-y', listA.name, listB.name])
# diff exits with 1 if outputs mismatch... grml
except subprocess.CalledProcessError, e:
inp = e.output
inp = inp.decode("utf-8").split("\n")
#create list of difference
for entry in inp:
g = entry.replace("\t"," ")
g = g.split()
new_list.append(g)
del new_list[-1]
return new_list
|
Add missing module for merge script#!/usr/bin/python
# coding=utf-8
import subprocess
class DiffMatcher(object):
def __init__(self, listA, listB):
self.listA = listA
self.listB = listB
def create_diff(self, listA, listB,case_sensitive):
new_list = []
#compare the two files
try:
if (case_sensitive):
#ignore case sensitiveness
inp = subprocess.check_output(['diff', '-iy', listA.name, listB.name])
else:
inp = subprocess.check_output(['diff', '-y', listA.name, listB.name])
# diff exits with 1 if outputs mismatch... grml
except subprocess.CalledProcessError, e:
inp = e.output
inp = inp.decode("utf-8").split("\n")
#create list of difference
for entry in inp:
g = entry.replace("\t"," ")
g = g.split()
new_list.append(g)
del new_list[-1]
return new_list
|
<commit_before><commit_msg>Add missing module for merge script<commit_after>#!/usr/bin/python
# coding=utf-8
import subprocess
class DiffMatcher(object):
def __init__(self, listA, listB):
self.listA = listA
self.listB = listB
def create_diff(self, listA, listB,case_sensitive):
new_list = []
#compare the two files
try:
if (case_sensitive):
#ignore case sensitiveness
inp = subprocess.check_output(['diff', '-iy', listA.name, listB.name])
else:
inp = subprocess.check_output(['diff', '-y', listA.name, listB.name])
# diff exits with 1 if outputs mismatch... grml
except subprocess.CalledProcessError, e:
inp = e.output
inp = inp.decode("utf-8").split("\n")
#create list of difference
for entry in inp:
g = entry.replace("\t"," ")
g = g.split()
new_list.append(g)
del new_list[-1]
return new_list
|
|
ba8be59db72c958e2ff20b9ae7fe81c400b40f9c
|
bluebottle/time_based/migrations/0008_auto_20201023_1443.py
|
bluebottle/time_based/migrations/0008_auto_20201023_1443.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0007_auto_20201023_1433'),
]
operations = [
migrations.AlterField(
model_name='ongoingactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
migrations.AlterField(
model_name='withadeadlineactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
]
|
Make start of ongoing and deadline activities just a date
|
Make start of ongoing and deadline activities just a date
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Make start of ongoing and deadline activities just a date
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0007_auto_20201023_1433'),
]
operations = [
migrations.AlterField(
model_name='ongoingactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
migrations.AlterField(
model_name='withadeadlineactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
]
|
<commit_before><commit_msg>Make start of ongoing and deadline activities just a date<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0007_auto_20201023_1433'),
]
operations = [
migrations.AlterField(
model_name='ongoingactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
migrations.AlterField(
model_name='withadeadlineactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
]
|
Make start of ongoing and deadline activities just a date# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0007_auto_20201023_1433'),
]
operations = [
migrations.AlterField(
model_name='ongoingactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
migrations.AlterField(
model_name='withadeadlineactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
]
|
<commit_before><commit_msg>Make start of ongoing and deadline activities just a date<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-10-23 12:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0007_auto_20201023_1433'),
]
operations = [
migrations.AlterField(
model_name='ongoingactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
migrations.AlterField(
model_name='withadeadlineactivity',
name='start',
field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),
),
]
|
|
38090ac06a48a4205cbc2318e3ad9296d5b08ea5
|
temba/msgs/migrations/0069_populate_broadcast_base_lang.py
|
temba/msgs/migrations/0069_populate_broadcast_base_lang.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations
from temba.utils import chunk_list
def do_populate(Broadcast, FlowStep):
BroadcastSteps = FlowStep.broadcasts.through
broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))
num_processed = 0
if broadcast_ids:
print("Starting population of Broadcast.base_language for %d total broadcasts..." % len(broadcast_ids))
for id_batch in chunk_list(broadcast_ids, 1000):
broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')
broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')
# dict of language codes to lists of broadcast ids
broadcasts_by_lang = defaultdict(list)
for broadcast_step in broadcast_steps:
flow = broadcast_step.flowstep.run.flow
if flow.base_language:
broadcasts_by_lang[flow.base_language].append(broadcast_step.broadcast_id)
# update each set of broadcasts associated with a particular flow
num_updated = 0
for lang, bcast_ids in broadcasts_by_lang.items():
Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)
num_updated += len(bcast_ids)
num_processed += len(id_batch)
print(" > Processed %d of %d broadcasts (updated %d with %d different languages)"
% (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))
if broadcast_ids:
print("Finished population of Broadcast.base_language for %d total broadcasts" % len(broadcast_ids))
def apply_as_migration(apps, schema_editor):
Broadcast = apps.get_model('msgs', 'Broadcast')
FlowStep = apps.get_model('flows', 'FlowStep')
do_populate(Broadcast, FlowStep)
def apply_offline():
from temba.flows.models import FlowStep
from temba.msgs.models import Broadcast
do_populate(Broadcast, FlowStep)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0068_broadcast_base_language'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
|
Add migration to populate Broadcast.base_language
|
Add migration to populate Broadcast.base_language
|
Python
|
agpl-3.0
|
pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro
|
Add migration to populate Broadcast.base_language
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations
from temba.utils import chunk_list
def do_populate(Broadcast, FlowStep):
BroadcastSteps = FlowStep.broadcasts.through
broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))
num_processed = 0
if broadcast_ids:
print("Starting population of Broadcast.base_language for %d total broadcasts..." % len(broadcast_ids))
for id_batch in chunk_list(broadcast_ids, 1000):
broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')
broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')
# dict of language codes to lists of broadcast ids
broadcasts_by_lang = defaultdict(list)
for broadcast_step in broadcast_steps:
flow = broadcast_step.flowstep.run.flow
if flow.base_language:
broadcasts_by_lang[flow.base_language].append(broadcast_step.broadcast_id)
# update each set of broadcasts associated with a particular flow
num_updated = 0
for lang, bcast_ids in broadcasts_by_lang.items():
Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)
num_updated += len(bcast_ids)
num_processed += len(id_batch)
print(" > Processed %d of %d broadcasts (updated %d with %d different languages)"
% (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))
if broadcast_ids:
print("Finished population of Broadcast.base_language for %d total broadcasts" % len(broadcast_ids))
def apply_as_migration(apps, schema_editor):
Broadcast = apps.get_model('msgs', 'Broadcast')
FlowStep = apps.get_model('flows', 'FlowStep')
do_populate(Broadcast, FlowStep)
def apply_offline():
from temba.flows.models import FlowStep
from temba.msgs.models import Broadcast
do_populate(Broadcast, FlowStep)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0068_broadcast_base_language'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
|
<commit_before><commit_msg>Add migration to populate Broadcast.base_language<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations
from temba.utils import chunk_list
def do_populate(Broadcast, FlowStep):
BroadcastSteps = FlowStep.broadcasts.through
broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))
num_processed = 0
if broadcast_ids:
print("Starting population of Broadcast.base_language for %d total broadcasts..." % len(broadcast_ids))
for id_batch in chunk_list(broadcast_ids, 1000):
broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')
broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')
# dict of language codes to lists of broadcast ids
broadcasts_by_lang = defaultdict(list)
for broadcast_step in broadcast_steps:
flow = broadcast_step.flowstep.run.flow
if flow.base_language:
broadcasts_by_lang[flow.base_language].append(broadcast_step.broadcast_id)
# update each set of broadcasts associated with a particular flow
num_updated = 0
for lang, bcast_ids in broadcasts_by_lang.items():
Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)
num_updated += len(bcast_ids)
num_processed += len(id_batch)
print(" > Processed %d of %d broadcasts (updated %d with %d different languages)"
% (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))
if broadcast_ids:
print("Finished population of Broadcast.base_language for %d total broadcasts" % len(broadcast_ids))
def apply_as_migration(apps, schema_editor):
Broadcast = apps.get_model('msgs', 'Broadcast')
FlowStep = apps.get_model('flows', 'FlowStep')
do_populate(Broadcast, FlowStep)
def apply_offline():
from temba.flows.models import FlowStep
from temba.msgs.models import Broadcast
do_populate(Broadcast, FlowStep)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0068_broadcast_base_language'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
|
Add migration to populate Broadcast.base_language# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations
from temba.utils import chunk_list
def do_populate(Broadcast, FlowStep):
BroadcastSteps = FlowStep.broadcasts.through
broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))
num_processed = 0
if broadcast_ids:
print("Starting population of Broadcast.base_language for %d total broadcasts..." % len(broadcast_ids))
for id_batch in chunk_list(broadcast_ids, 1000):
broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')
broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')
# dict of language codes to lists of broadcast ids
broadcasts_by_lang = defaultdict(list)
for broadcast_step in broadcast_steps:
flow = broadcast_step.flowstep.run.flow
if flow.base_language:
broadcasts_by_lang[flow.base_language].append(broadcast_step.broadcast_id)
# update each set of broadcasts associated with a particular flow
num_updated = 0
for lang, bcast_ids in broadcasts_by_lang.items():
Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)
num_updated += len(bcast_ids)
num_processed += len(id_batch)
print(" > Processed %d of %d broadcasts (updated %d with %d different languages)"
% (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))
if broadcast_ids:
print("Finished population of Broadcast.base_language for %d total broadcasts" % len(broadcast_ids))
def apply_as_migration(apps, schema_editor):
Broadcast = apps.get_model('msgs', 'Broadcast')
FlowStep = apps.get_model('flows', 'FlowStep')
do_populate(Broadcast, FlowStep)
def apply_offline():
from temba.flows.models import FlowStep
from temba.msgs.models import Broadcast
do_populate(Broadcast, FlowStep)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0068_broadcast_base_language'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
|
<commit_before><commit_msg>Add migration to populate Broadcast.base_language<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations
from temba.utils import chunk_list
def do_populate(Broadcast, FlowStep):
BroadcastSteps = FlowStep.broadcasts.through
broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))
num_processed = 0
if broadcast_ids:
print("Starting population of Broadcast.base_language for %d total broadcasts..." % len(broadcast_ids))
for id_batch in chunk_list(broadcast_ids, 1000):
broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')
broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')
# dict of language codes to lists of broadcast ids
broadcasts_by_lang = defaultdict(list)
for broadcast_step in broadcast_steps:
flow = broadcast_step.flowstep.run.flow
if flow.base_language:
broadcasts_by_lang[flow.base_language].append(broadcast_step.broadcast_id)
# update each set of broadcasts associated with a particular flow
num_updated = 0
for lang, bcast_ids in broadcasts_by_lang.items():
Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)
num_updated += len(bcast_ids)
num_processed += len(id_batch)
print(" > Processed %d of %d broadcasts (updated %d with %d different languages)"
% (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))
if broadcast_ids:
print("Finished population of Broadcast.base_language for %d total broadcasts" % len(broadcast_ids))
def apply_as_migration(apps, schema_editor):
Broadcast = apps.get_model('msgs', 'Broadcast')
FlowStep = apps.get_model('flows', 'FlowStep')
do_populate(Broadcast, FlowStep)
def apply_offline():
from temba.flows.models import FlowStep
from temba.msgs.models import Broadcast
do_populate(Broadcast, FlowStep)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0068_broadcast_base_language'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
|
|
d2a80a76fdf28625ad36b2fd71af56938b9b9506
|
src/trackknown.py
|
src/trackknown.py
|
#!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, โOnline exploration in least-squares policy iterationโ AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0]
|
Add needed track known class.
|
Add needed track known class.
|
Python
|
bsd-2-clause
|
stober/lspi
|
Add needed track known class.
|
#!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, โOnline exploration in least-squares policy iterationโ AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0]
|
<commit_before><commit_msg>Add needed track known class.<commit_after>
|
#!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, โOnline exploration in least-squares policy iterationโ AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0]
|
Add needed track known class.#!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, โOnline exploration in least-squares policy iterationโ AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0]
|
<commit_before><commit_msg>Add needed track known class.<commit_after>#!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, โOnline exploration in least-squares policy iterationโ AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0]
|
|
5dfa4397a282ddbafb57d990bc7d630fb6f927de
|
build.py
|
build.py
|
"""Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
|
Add helper method for execute a commands
|
Add helper method for execute a commands
|
Python
|
mit
|
alexandrucoman/bcbio-dev-conda,alexandrucoman/bcbio-dev-conda
|
Add helper method for execute a commands
|
"""Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
|
<commit_before><commit_msg>Add helper method for execute a commands<commit_after>
|
"""Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
|
Add helper method for execute a commands"""Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
|
<commit_before><commit_msg>Add helper method for execute a commands<commit_after>"""Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
|
|
bd371ecbd2ac163e44f104a775390b2ca2b88d35
|
migrations/versions/75704b2e975e_add_index_on_departement_for_numero.py
|
migrations/versions/75704b2e975e_add_index_on_departement_for_numero.py
|
"""Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
|
Add migration for index on departement
|
Add migration for index on departement
|
Python
|
agpl-3.0
|
openmaraude/APITaxi,openmaraude/APITaxi
|
Add migration for index on departement
|
"""Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
|
<commit_before><commit_msg>Add migration for index on departement<commit_after>
|
"""Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
|
Add migration for index on departement"""Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
|
<commit_before><commit_msg>Add migration for index on departement<commit_after>"""Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
|
|
e7e37e9b1fd56d18711299065d6f421c1cb28bac
|
moksha/tests/test_feed.py
|
moksha/tests/test_feed.py
|
from tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
|
Add some Feed test cases
|
Add some Feed test cases
|
Python
|
apache-2.0
|
pombredanne/moksha,lmacken/moksha,pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha
|
Add some Feed test cases
|
from tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
|
<commit_before><commit_msg>Add some Feed test cases<commit_after>
|
from tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
|
Add some Feed test casesfrom tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
|
<commit_before><commit_msg>Add some Feed test cases<commit_after>from tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
|
|
226cf36e4b4d069a920785b492804b78eebc34a5
|
corehq/apps/locations/management/commands/migrate_admin_status.py
|
corehq/apps/locations/management/commands/migrate_admin_status.py
|
# One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
|
Make non-commtrack location types administrative
|
Make non-commtrack location types administrative
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Make non-commtrack location types administrative
|
# One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
|
<commit_before><commit_msg>Make non-commtrack location types administrative<commit_after>
|
# One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
|
Make non-commtrack location types administrative# One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
|
<commit_before><commit_msg>Make non-commtrack location types administrative<commit_after># One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
|
|
2ae6f4183b2096287f8155d7db7e2ed0444618c4
|
day_one_entry_splitter.py
|
day_one_entry_splitter.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
|
Add first version of Day One entry splitter
|
Add first version of Day One entry splitter
|
Python
|
mit
|
rdocking/bits_and_bobs
|
Add first version of Day One entry splitter
|
#!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add first version of Day One entry splitter<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
|
Add first version of Day One entry splitter#!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add first version of Day One entry splitter<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
|
|
1b3d7078a4ca91ef07f90d1645f26761d1f7abac
|
examples/scatter.py
|
examples/scatter.py
|
"""
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
|
Add example of using lower-level plotting methods directly
|
Add example of using lower-level plotting methods directly
|
Python
|
mit
|
joferkington/mplstereonet
|
Add example of using lower-level plotting methods directly
|
"""
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
|
<commit_before><commit_msg>Add example of using lower-level plotting methods directly<commit_after>
|
"""
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
|
Add example of using lower-level plotting methods directly"""
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
|
<commit_before><commit_msg>Add example of using lower-level plotting methods directly<commit_after>"""
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
|
|
5828823d505aae1425fd2353f898c5b18722e6e5
|
src/robotide/ui/progress.py
|
src/robotide/ui/progress.py
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
|
Introduce base class and ProgressObserver for renaming occurences.
|
progess: Introduce base class and ProgressObserver for renaming occurences.
|
Python
|
apache-2.0
|
caio2k/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,robotframework/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,caio2k/RIDE,caio2k/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
progess: Introduce base class and ProgressObserver for renaming occurences.
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
|
<commit_before># Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
<commit_msg>progess: Introduce base class and ProgressObserver for renaming occurences.<commit_after>
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
|
# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
progess: Introduce base class and ProgressObserver for renaming occurences.# Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
|
<commit_before># Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
<commit_msg>progess: Introduce base class and ProgressObserver for renaming occurences.<commit_after># Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
|
2059aa7776a8e0c947b68e9401d74bdd146a59cd
|
ch03_04.py
|
ch03_04.py
|
(day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
|
Test passed for week day
|
Test passed for week day
|
Python
|
mit
|
sitdh/com-prog
|
Test passed for week day
|
(day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
|
<commit_before><commit_msg>Test passed for week day<commit_after>
|
(day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
|
Test passed for week day(day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
|
<commit_before><commit_msg>Test passed for week day<commit_after>(day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
|
|
9f443a5af6537867712f12419d93a5b8c824858a
|
flexget/plugins/output/notify_osd.py
|
flexget/plugins/output/notify_osd.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
|
Add Notify-osd option for linux based systems
|
Add Notify-osd option for linux based systems
|
Python
|
mit
|
jacobmetrick/Flexget,ratoaq2/Flexget,thalamus/Flexget,X-dark/Flexget,patsissons/Flexget,asm0dey/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,tobinjt/Flexget,poulpito/Flexget,sean797/Flexget,thalamus/Flexget,OmgOhnoes/Flexget,camon/Flexget,gazpachoking/Flexget,tsnoam/Flexget,v17al/Flexget,vfrc2/Flexget,drwyrm/Flexget,tsnoam/Flexget,vfrc2/Flexget,Pretagonist/Flexget,ibrahimkarahan/Flexget,ianstalk/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,camon/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,Flexget/Flexget,patsissons/Flexget,dsemi/Flexget,Danfocus/Flexget,ianstalk/Flexget,v17al/Flexget,ratoaq2/Flexget,Flexget/Flexget,drwyrm/Flexget,qk4l/Flexget,asm0dey/Flexget,ZefQ/Flexget,oxc/Flexget,LynxyssCZ/Flexget,tvcsantos/Flexget,poulpito/Flexget,ZefQ/Flexget,ratoaq2/Flexget,jacobmetrick/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,oxc/Flexget,v17al/Flexget,sean797/Flexget,malkavi/Flexget,malkavi/Flexget,offbyone/Flexget,antivirtel/Flexget,jawilson/Flexget,jawilson/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,malkavi/Flexget,tarzasai/Flexget,Danfocus/Flexget,cvium/Flexget,LynxyssCZ/Flexget,Pretagonist/Flexget,malkavi/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,tvcsantos/Flexget,drwyrm/Flexget,voriux/Flexget,oxc/Flexget,xfouloux/Flexget,X-dark/Flexget,spencerjanssen/Flexget,crawln45/Flexget,tsnoam/Flexget,vfrc2/Flexget,lildadou/Flexget,Pretagonist/Flexget,voriux/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,ZefQ/Flexget,X-dark/Flexget,jawilson/Flexget,xfouloux/Flexget,Flexget/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,Danfocus/Flexget,asm0dey/Flexget,grrr2/Flexget,tarzasai/Flexget,jacobmetrick/Flexget,crawln45/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,offbyone/Flexget,grrr2/Flexget,LynxyssCZ/Flexget,spencerjanssen/Flexget,lildadou/Flexget,tobinjt/Flexget,qvazzler/Flexget,cvium/Flexget,patsissons/Flexget,sean797/Flexget,qk4l/Flexget,cvium/Flexget,antivirtel/Flexget,thalamus/Flexget,ianstalk/Flexget,offbyone/Flexget,xfouloux/Flexget,lildadou/Flexget,crawln45/Flexget,OmgOhnoes/Flexget,tobinjt/Flexget,gazpachoking/Flexget,qvazzler/Flexget,poulpito/Flexget,grrr2/Flexget
|
Add Notify-osd option for linux based systems
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
|
<commit_before><commit_msg>Add Notify-osd option for linux based systems<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
|
Add Notify-osd option for linux based systemsfrom __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
|
<commit_before><commit_msg>Add Notify-osd option for linux based systems<commit_after>from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
|
|
93b2d737407389a1c4dbc67836a949663eeba948
|
chrome/PRESUBMIT.py
|
chrome/PRESUBMIT.py
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,ltilve/chromium,zcbenz/cefode-chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,rogerwang/chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,nacl-webkit/chrome_deps,markYoungH/chromium.src,timopulkkinen/BubbleFish,anirudhSK/chromium,dednal/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,rogerwang/chromium,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,keishi/chromium,dednal/chromium.src,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,keishi/chromium,Chilledheart/chromium,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,dednal/chromium.src,chuan9/chromium-crosswalk,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,Jonekee/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,rogerwang/chromium,robclark/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,jaruba/chromium.src,robclark/chromium,axinging/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,rogerwang/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,Fireblend/chromium-crosswalk,robclark/chromium,hgl888/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,patrickm/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,ltilve/chromium,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,nacl-webkit/chrome_deps,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,zcbenz/cefode-chromium,markYoungH/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,Chilledheart/chromium,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk,littlstar/chromium.src,keishi/chromium,anirudhSK/chromium,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,keishi/chromium,dednal/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,robclark/chromium,mogoweb/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,axinging/chromium-crosswalk,ltilve/chromium,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,keishi/chromium,ltilve/chromium,robclark/chromium,M4sse/chromium.src,Chilledheart/chromium,jaruba/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,keishi/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,rogerwang/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,patrickm/chromium.src,M4sse/chromium.src,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,rogerwang/chromium,M4sse/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,Chilledheart/chromium,robclark/chromium,ltilve/chromium,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,rogerwang/chromium,zcbenz/cefode-chromium,M4sse/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,rogerwang/chromium,axinging/chromium-crosswalk,jaruba/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,zcbenz/cefode-chromium,hujiajie/pa-chromium,markYoungH/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,Just-D/chromium-1,jaruba/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,robclark/chromium,keishi/chromium,hujiajie/pa-chromium,anirudhSK/chromium,Chilledheart/chromium,zcbenz/cefode-chromium,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,keishi/chromium,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,robclark/chromium,dushu1203/chromium.src,nacl-webkit/chrome_deps,ondra-novak/chromium.src,robclark/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,rogerwang/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,robclark/chromium,Chilledheart/chromium,keishi/chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,rogerwang/chromium,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,keishi/chromium,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,patrickm/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,keishi/chromium,dushu1203/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
|
9f46cf4836ad555a54dc9c47b8b2843643a878f2
|
migrations/versions/840_migrate_draft_dos1_briefs_to_draft_dos2.py
|
migrations/versions/840_migrate_draft_dos1_briefs_to_draft_dos2.py
|
"""Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
Create migration for draft dos1 briefs to dos2
|
Create migration for draft dos1 briefs to dos2
When DOS1 is closed for brief submissions and DOS2 opened, any supplier
who has a draft DOS1 brief will effectively lose it, as they'll be
unable to publish it.
This migration will move all draft DOS1 briefs to be draft DOS2 briefs,
allowing them to be published. Which framework version the brief
goes into doesn't affect buyers - they are eligible by default.
This will need to be run immediately after DOS1 is closed so that no
buyer attempt to publish their DOS1 brief in the period between DOS1
closing and the migration happening.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Create migration for draft dos1 briefs to dos2
When DOS1 is closed for brief submissions and DOS2 opened, any supplier
who has a draft DOS1 brief will effectively lose it, as they'll be
unable to publish it.
This migration will move all draft DOS1 briefs to be draft DOS2 briefs,
allowing them to be published. Which framework version the brief
goes into doesn't affect buyers - they are eligible by default.
This will need to be run immediately after DOS1 is closed so that no
buyer attempt to publish their DOS1 brief in the period between DOS1
closing and the migration happening.
|
"""Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
<commit_before><commit_msg>Create migration for draft dos1 briefs to dos2
When DOS1 is closed for brief submissions and DOS2 opened, any supplier
who has a draft DOS1 brief will effectively lose it, as they'll be
unable to publish it.
This migration will move all draft DOS1 briefs to be draft DOS2 briefs,
allowing them to be published. Which framework version the brief
goes into doesn't affect buyers - they are eligible by default.
This will need to be run immediately after DOS1 is closed so that no
buyer attempt to publish their DOS1 brief in the period between DOS1
closing and the migration happening.<commit_after>
|
"""Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
Create migration for draft dos1 briefs to dos2
When DOS1 is closed for brief submissions and DOS2 opened, any supplier
who has a draft DOS1 brief will effectively lose it, as they'll be
unable to publish it.
This migration will move all draft DOS1 briefs to be draft DOS2 briefs,
allowing them to be published. Which framework version the brief
goes into doesn't affect buyers - they are eligible by default.
This will need to be run immediately after DOS1 is closed so that no
buyer attempt to publish their DOS1 brief in the period between DOS1
closing and the migration happening."""Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
<commit_before><commit_msg>Create migration for draft dos1 briefs to dos2
When DOS1 is closed for brief submissions and DOS2 opened, any supplier
who has a draft DOS1 brief will effectively lose it, as they'll be
unable to publish it.
This migration will move all draft DOS1 briefs to be draft DOS2 briefs,
allowing them to be published. Which framework version the brief
goes into doesn't affect buyers - they are eligible by default.
This will need to be run immediately after DOS1 is closed so that no
buyer attempt to publish their DOS1 brief in the period between DOS1
closing and the migration happening.<commit_after>"""Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
|
d24e8c746359169058e9c0577c2f843695ca3b55
|
heat/tests/functional/test_WordPress_2_Instances_With_EBS.py
|
heat/tests/functional/test_WordPress_2_Instances_With_EBS.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
|
Add 2 instance with EBS test.
|
Add 2 instance with EBS test.
Seems to work even.
Change-Id: Ie09d97dc86a68525783f1f9c080fed13e651ef78
Signed-off-by: Ian Main <87edab3227c32093d388ed00258c1a5e63cfd715@redhat.com>
|
Python
|
apache-2.0
|
citrix-openstack-build/heat,JioCloud/heat,dragorosson/heat,rh-s/heat,rickerc/heat_audit,noironetworks/heat,pshchelo/heat,jasondunsmore/heat,rh-s/heat,NeCTAR-RC/heat,cwolferh/heat-scratch,cryptickp/heat,miguelgrinberg/heat,varunarya10/heat,pratikmallya/heat,takeshineshiro/heat,ntt-sic/heat,pratikmallya/heat,redhat-openstack/heat,dims/heat,steveb/heat,Triv90/Heat,noironetworks/heat,rdo-management/heat,gonzolino/heat,miguelgrinberg/heat,Triv90/Heat,gonzolino/heat,rickerc/heat_audit,takeshineshiro/heat,steveb/heat,dims/heat,jasondunsmore/heat,redhat-openstack/heat,pshchelo/heat,maestro-hybrid-cloud/heat,varunarya10/heat,dragorosson/heat,cryptickp/heat,openstack/heat,openstack/heat,srznew/heat,NeCTAR-RC/heat,cwolferh/heat-scratch,maestro-hybrid-cloud/heat,citrix-openstack-build/heat,rdo-management/heat,ntt-sic/heat,srznew/heat,JioCloud/heat,Triv90/Heat
|
Add 2 instance with EBS test.
Seems to work even.
Change-Id: Ie09d97dc86a68525783f1f9c080fed13e651ef78
Signed-off-by: Ian Main <87edab3227c32093d388ed00258c1a5e63cfd715@redhat.com>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
|
<commit_before><commit_msg>Add 2 instance with EBS test.
Seems to work even.
Change-Id: Ie09d97dc86a68525783f1f9c080fed13e651ef78
Signed-off-by: Ian Main <87edab3227c32093d388ed00258c1a5e63cfd715@redhat.com><commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
|
Add 2 instance with EBS test.
Seems to work even.
Change-Id: Ie09d97dc86a68525783f1f9c080fed13e651ef78
Signed-off-by: Ian Main <87edab3227c32093d388ed00258c1a5e63cfd715@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
|
<commit_before><commit_msg>Add 2 instance with EBS test.
Seems to work even.
Change-Id: Ie09d97dc86a68525783f1f9c080fed13e651ef78
Signed-off-by: Ian Main <87edab3227c32093d388ed00258c1a5e63cfd715@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
|
|
673dac79cbab6de0be5650d46840a3bc9858b2b4
|
tests/clear_qiniu_bucket.py
|
tests/clear_qiniu_bucket.py
|
import os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
|
Add a help script to clear the test bucket
|
Add a help script to clear the test bucket
|
Python
|
mit
|
glasslion/django-qiniu-storage,jeffrey4l/django-qiniu-storage,Mark-Shine/django-qiniu-storage,jackeyGao/django-qiniu-storage
|
Add a help script to clear the test bucket
|
import os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a help script to clear the test bucket<commit_after>
|
import os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
|
Add a help script to clear the test bucketimport os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a help script to clear the test bucket<commit_after>import os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
|
|
666d9c467806782827edac4b2c0c13d494e41250
|
jobmon/test/test_status_server.py
|
jobmon/test/test_status_server.py
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
|
Add a test for the status server
|
Add a test for the status server
|
Python
|
bsd-2-clause
|
adamnew123456/jobmon
|
Add a test for the status server
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
|
<commit_before><commit_msg>Add a test for the status server<commit_after>
|
import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
|
Add a test for the status serverimport os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
|
<commit_before><commit_msg>Add a test for the status server<commit_after>import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
|
|
45b0e958aa377afed2c62bf1e6f7c4933ccde39b
|
test/test_main.py
|
test/test_main.py
|
from git_lang_guesser import main
from git_lang_guesser import git_requester
LANGUAGE = "language"
test_username = "TestUser"
example_data = [
{LANGUAGE: "HTML"},
{LANGUAGE: "Java"},
{LANGUAGE: "Python"},
{LANGUAGE: "Python"},
{LANGUAGE: "C"},
]
expected_count = {
"HTML": 1,
"Java": 1,
"Python": 2,
"C": 1,
}
expected_favourite = "Python"
class TestDoGuess(object):
def test_basic(self, monkeypatch, capsys):
"""Test that basic usage works"""
def mock_request(username):
assert(username == test_username)
return example_data
monkeypatch.setattr(git_requester, "get_public_repos_for_user", mock_request)
main.do_guess(username=test_username, list_all=False)
out, err = capsys.readouterr()
assert(out.strip() == expected_favourite)
|
Add a test for main
|
Add a test for main
More to follow
|
Python
|
mit
|
robbie-c/git-lang-guesser
|
Add a test for main
More to follow
|
from git_lang_guesser import main
from git_lang_guesser import git_requester
LANGUAGE = "language"
test_username = "TestUser"
example_data = [
{LANGUAGE: "HTML"},
{LANGUAGE: "Java"},
{LANGUAGE: "Python"},
{LANGUAGE: "Python"},
{LANGUAGE: "C"},
]
expected_count = {
"HTML": 1,
"Java": 1,
"Python": 2,
"C": 1,
}
expected_favourite = "Python"
class TestDoGuess(object):
def test_basic(self, monkeypatch, capsys):
"""Test that basic usage works"""
def mock_request(username):
assert(username == test_username)
return example_data
monkeypatch.setattr(git_requester, "get_public_repos_for_user", mock_request)
main.do_guess(username=test_username, list_all=False)
out, err = capsys.readouterr()
assert(out.strip() == expected_favourite)
|
<commit_before><commit_msg>Add a test for main
More to follow<commit_after>
|
from git_lang_guesser import main
from git_lang_guesser import git_requester
LANGUAGE = "language"
test_username = "TestUser"
example_data = [
{LANGUAGE: "HTML"},
{LANGUAGE: "Java"},
{LANGUAGE: "Python"},
{LANGUAGE: "Python"},
{LANGUAGE: "C"},
]
expected_count = {
"HTML": 1,
"Java": 1,
"Python": 2,
"C": 1,
}
expected_favourite = "Python"
class TestDoGuess(object):
def test_basic(self, monkeypatch, capsys):
"""Test that basic usage works"""
def mock_request(username):
assert(username == test_username)
return example_data
monkeypatch.setattr(git_requester, "get_public_repos_for_user", mock_request)
main.do_guess(username=test_username, list_all=False)
out, err = capsys.readouterr()
assert(out.strip() == expected_favourite)
|
Add a test for main
More to followfrom git_lang_guesser import main
from git_lang_guesser import git_requester
LANGUAGE = "language"
test_username = "TestUser"
example_data = [
{LANGUAGE: "HTML"},
{LANGUAGE: "Java"},
{LANGUAGE: "Python"},
{LANGUAGE: "Python"},
{LANGUAGE: "C"},
]
expected_count = {
"HTML": 1,
"Java": 1,
"Python": 2,
"C": 1,
}
expected_favourite = "Python"
class TestDoGuess(object):
def test_basic(self, monkeypatch, capsys):
"""Test that basic usage works"""
def mock_request(username):
assert(username == test_username)
return example_data
monkeypatch.setattr(git_requester, "get_public_repos_for_user", mock_request)
main.do_guess(username=test_username, list_all=False)
out, err = capsys.readouterr()
assert(out.strip() == expected_favourite)
|
<commit_before><commit_msg>Add a test for main
More to follow<commit_after>from git_lang_guesser import main
from git_lang_guesser import git_requester
LANGUAGE = "language"
test_username = "TestUser"
example_data = [
{LANGUAGE: "HTML"},
{LANGUAGE: "Java"},
{LANGUAGE: "Python"},
{LANGUAGE: "Python"},
{LANGUAGE: "C"},
]
expected_count = {
"HTML": 1,
"Java": 1,
"Python": 2,
"C": 1,
}
expected_favourite = "Python"
class TestDoGuess(object):
def test_basic(self, monkeypatch, capsys):
"""Test that basic usage works"""
def mock_request(username):
assert(username == test_username)
return example_data
monkeypatch.setattr(git_requester, "get_public_repos_for_user", mock_request)
main.do_guess(username=test_username, list_all=False)
out, err = capsys.readouterr()
assert(out.strip() == expected_favourite)
|
|
092bf8bc2e558420ca51384a3dd1019ab1115ad2
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=[],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
Fix conditional dependencies when using wheels
|
Fix conditional dependencies when using wheels
see also:
https://github.com/nephila/djangocms-blog/pull/80
|
Python
|
bsd-3-clause
|
grigno/djangocms-link,brente/djangocms-link,addgene/djangocms-link,garmoncheg/djangocms-link,yakky/djangocms-link,yakky/djangocms-link,brente/djangocms-link,addgene/djangocms-link,grigno/djangocms-link,garmoncheg/djangocms-link
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
Fix conditional dependencies when using wheels
see also:
https://github.com/nephila/djangocms-blog/pull/80
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=[],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
<commit_msg>Fix conditional dependencies when using wheels
see also:
https://github.com/nephila/djangocms-blog/pull/80<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=[],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
Fix conditional dependencies when using wheels
see also:
https://github.com/nephila/djangocms-blog/pull/80#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=[],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
<commit_msg>Fix conditional dependencies when using wheels
see also:
https://github.com/nephila/djangocms-blog/pull/80<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=[],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
b418ff779c79afd0eca85ed1479ba633f25ce73c
|
nova/tests/test_vmwareapi_vm_util.py
|
nova/tests/test_vmwareapi_vm_util.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
|
Fix variable referenced before assginment in vmwareapi code.
|
Fix variable referenced before assginment in vmwareapi code.
Add unitests for VMwareapi vm_util.
fix bug #1177689
Change-Id: If16109ee626c197227affba122c2e4986d92d2df
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
Fix variable referenced before assginment in vmwareapi code.
Add unitests for VMwareapi vm_util.
fix bug #1177689
Change-Id: If16109ee626c197227affba122c2e4986d92d2df
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
|
<commit_before><commit_msg>Fix variable referenced before assginment in vmwareapi code.
Add unitests for VMwareapi vm_util.
fix bug #1177689
Change-Id: If16109ee626c197227affba122c2e4986d92d2df<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
|
Fix variable referenced before assginment in vmwareapi code.
Add unitests for VMwareapi vm_util.
fix bug #1177689
Change-Id: If16109ee626c197227affba122c2e4986d92d2df# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
|
<commit_before><commit_msg>Fix variable referenced before assginment in vmwareapi code.
Add unitests for VMwareapi vm_util.
fix bug #1177689
Change-Id: If16109ee626c197227affba122c2e4986d92d2df<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
|
|
51e516f260858e699ee828ac6fc91af39c67254c
|
update-schemas.py
|
update-schemas.py
|
#!/usr/bin/env python
import os
import re
import sys
import subprocess as sp
def get_schemas(pattern):
cmd = ['git', 'grep', '--name-only']
output = sp.check_output(cmd + [pattern, '--', 'schemas']).decode('utf8')
names = output.split()
print(names)
dedupe = dict()
for name in names:
version = re.findall(r'\d\.\d.\d', name)[0]
basepath = name.split('-')[0]
if basepath in dedupe and dedupe[basepath] > version:
continue
dedupe[basepath] = version
return ['{}-{}.yaml'.format(x, y) for x,y in dedupe.items()]
def update_version(string):
groups = re.search(r'((\d)\.(\d)\.(\d))', string).groups()
bumped = int(groups[2]) + 1
new_version = '{}.{}.{}'.format(groups[1], bumped, groups[3])
return re.sub(r'((\d)\.(\d)\.(\d))', new_version, string)
def create_updated_schema(schema, pattern, new_pattern):
name = os.path.splitext(os.path.basename(schema))[0]
updated = update_version(name)
new_schema = re.sub(name, updated, schema)
with open(new_schema, 'w') as new_file:
with open(schema, 'r') as old_file:
for line in old_file:
line = line.replace(pattern, new_pattern)
line = line.replace(name, updated)
new_file.write(line)
def main():
if len(sys.argv) != 2:
name = os.path.basename(sys.argv[0])
sys.stderr.write('USAGE: {} <pattern>\n'.format(name))
exit(1)
pattern = sys.argv[1]
new_pattern = update_version(pattern)
schemas = get_schemas(pattern)
for s in schemas:
create_updated_schema(s, pattern, new_pattern)
if __name__ == '__main__':
main()
|
Add script for automatically updating schemas
|
Add script for automatically updating schemas
|
Python
|
bsd-3-clause
|
spacetelescope/asdf-standard
|
Add script for automatically updating schemas
|
#!/usr/bin/env python
import os
import re
import sys
import subprocess as sp
def get_schemas(pattern):
cmd = ['git', 'grep', '--name-only']
output = sp.check_output(cmd + [pattern, '--', 'schemas']).decode('utf8')
names = output.split()
print(names)
dedupe = dict()
for name in names:
version = re.findall(r'\d\.\d.\d', name)[0]
basepath = name.split('-')[0]
if basepath in dedupe and dedupe[basepath] > version:
continue
dedupe[basepath] = version
return ['{}-{}.yaml'.format(x, y) for x,y in dedupe.items()]
def update_version(string):
groups = re.search(r'((\d)\.(\d)\.(\d))', string).groups()
bumped = int(groups[2]) + 1
new_version = '{}.{}.{}'.format(groups[1], bumped, groups[3])
return re.sub(r'((\d)\.(\d)\.(\d))', new_version, string)
def create_updated_schema(schema, pattern, new_pattern):
name = os.path.splitext(os.path.basename(schema))[0]
updated = update_version(name)
new_schema = re.sub(name, updated, schema)
with open(new_schema, 'w') as new_file:
with open(schema, 'r') as old_file:
for line in old_file:
line = line.replace(pattern, new_pattern)
line = line.replace(name, updated)
new_file.write(line)
def main():
if len(sys.argv) != 2:
name = os.path.basename(sys.argv[0])
sys.stderr.write('USAGE: {} <pattern>\n'.format(name))
exit(1)
pattern = sys.argv[1]
new_pattern = update_version(pattern)
schemas = get_schemas(pattern)
for s in schemas:
create_updated_schema(s, pattern, new_pattern)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for automatically updating schemas<commit_after>
|
#!/usr/bin/env python
import os
import re
import sys
import subprocess as sp
def get_schemas(pattern):
cmd = ['git', 'grep', '--name-only']
output = sp.check_output(cmd + [pattern, '--', 'schemas']).decode('utf8')
names = output.split()
print(names)
dedupe = dict()
for name in names:
version = re.findall(r'\d\.\d.\d', name)[0]
basepath = name.split('-')[0]
if basepath in dedupe and dedupe[basepath] > version:
continue
dedupe[basepath] = version
return ['{}-{}.yaml'.format(x, y) for x,y in dedupe.items()]
def update_version(string):
groups = re.search(r'((\d)\.(\d)\.(\d))', string).groups()
bumped = int(groups[2]) + 1
new_version = '{}.{}.{}'.format(groups[1], bumped, groups[3])
return re.sub(r'((\d)\.(\d)\.(\d))', new_version, string)
def create_updated_schema(schema, pattern, new_pattern):
name = os.path.splitext(os.path.basename(schema))[0]
updated = update_version(name)
new_schema = re.sub(name, updated, schema)
with open(new_schema, 'w') as new_file:
with open(schema, 'r') as old_file:
for line in old_file:
line = line.replace(pattern, new_pattern)
line = line.replace(name, updated)
new_file.write(line)
def main():
if len(sys.argv) != 2:
name = os.path.basename(sys.argv[0])
sys.stderr.write('USAGE: {} <pattern>\n'.format(name))
exit(1)
pattern = sys.argv[1]
new_pattern = update_version(pattern)
schemas = get_schemas(pattern)
for s in schemas:
create_updated_schema(s, pattern, new_pattern)
if __name__ == '__main__':
main()
|
Add script for automatically updating schemas#!/usr/bin/env python
import os
import re
import sys
import subprocess as sp
def get_schemas(pattern):
cmd = ['git', 'grep', '--name-only']
output = sp.check_output(cmd + [pattern, '--', 'schemas']).decode('utf8')
names = output.split()
print(names)
dedupe = dict()
for name in names:
version = re.findall(r'\d\.\d.\d', name)[0]
basepath = name.split('-')[0]
if basepath in dedupe and dedupe[basepath] > version:
continue
dedupe[basepath] = version
return ['{}-{}.yaml'.format(x, y) for x,y in dedupe.items()]
def update_version(string):
groups = re.search(r'((\d)\.(\d)\.(\d))', string).groups()
bumped = int(groups[2]) + 1
new_version = '{}.{}.{}'.format(groups[1], bumped, groups[3])
return re.sub(r'((\d)\.(\d)\.(\d))', new_version, string)
def create_updated_schema(schema, pattern, new_pattern):
name = os.path.splitext(os.path.basename(schema))[0]
updated = update_version(name)
new_schema = re.sub(name, updated, schema)
with open(new_schema, 'w') as new_file:
with open(schema, 'r') as old_file:
for line in old_file:
line = line.replace(pattern, new_pattern)
line = line.replace(name, updated)
new_file.write(line)
def main():
if len(sys.argv) != 2:
name = os.path.basename(sys.argv[0])
sys.stderr.write('USAGE: {} <pattern>\n'.format(name))
exit(1)
pattern = sys.argv[1]
new_pattern = update_version(pattern)
schemas = get_schemas(pattern)
for s in schemas:
create_updated_schema(s, pattern, new_pattern)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for automatically updating schemas<commit_after>#!/usr/bin/env python
import os
import re
import sys
import subprocess as sp
def get_schemas(pattern):
cmd = ['git', 'grep', '--name-only']
output = sp.check_output(cmd + [pattern, '--', 'schemas']).decode('utf8')
names = output.split()
print(names)
dedupe = dict()
for name in names:
version = re.findall(r'\d\.\d.\d', name)[0]
basepath = name.split('-')[0]
if basepath in dedupe and dedupe[basepath] > version:
continue
dedupe[basepath] = version
return ['{}-{}.yaml'.format(x, y) for x,y in dedupe.items()]
def update_version(string):
groups = re.search(r'((\d)\.(\d)\.(\d))', string).groups()
bumped = int(groups[2]) + 1
new_version = '{}.{}.{}'.format(groups[1], bumped, groups[3])
return re.sub(r'((\d)\.(\d)\.(\d))', new_version, string)
def create_updated_schema(schema, pattern, new_pattern):
name = os.path.splitext(os.path.basename(schema))[0]
updated = update_version(name)
new_schema = re.sub(name, updated, schema)
with open(new_schema, 'w') as new_file:
with open(schema, 'r') as old_file:
for line in old_file:
line = line.replace(pattern, new_pattern)
line = line.replace(name, updated)
new_file.write(line)
def main():
if len(sys.argv) != 2:
name = os.path.basename(sys.argv[0])
sys.stderr.write('USAGE: {} <pattern>\n'.format(name))
exit(1)
pattern = sys.argv[1]
new_pattern = update_version(pattern)
schemas = get_schemas(pattern)
for s in schemas:
create_updated_schema(s, pattern, new_pattern)
if __name__ == '__main__':
main()
|
|
e1993d4d3c3199dce2be2b475a9236e95732a0f0
|
dodge.py
|
dodge.py
|
import platform
class OSXDodger(object):
allowed_version = "10.12.1"
def __init__(self, applications_dir):
self.app_dir = applications_dir
def load_applications(self):
"""
Read all applications in the `/Applications/` dir
"""
pass
def select_applications(self):
"""
Allow user to select an application they want
not to appear on the Dock
"""
pass
def load_dodger_filer(self):
"""
Load the file to modify for the application
chosen by the user in `select_applications`
The file to be loaded for is `info.plist`
"""
pass
def dodge_application(self):
"""
Remive the application from the Dock
"""
pass
@classmethod
def pc_is_macintosh(cls):
"""
Check if it is an `Apple Computer` i.e a Mac
@return bool
"""
system = platform.system().lower()
sys_version = int((platform.mac_ver())[0].replace(".", ""))
allowed_version = int(cls.allowed_version.replace(".", ""))
if (system == "darwin") and (sys_version >= allowed_version):
return True
else:
print("\nSorry :(")
print("FAILED. OsX-dock-dodger is only applicable to computers " +
"running OS X {} or higher".format(cls.allowed_version))
return False
dodge = OSXDodger("/Applications/")
dodge.pc_is_macintosh()
|
Verify computer executing the script is of OS X 10.6.1 or higher type
|
Verify computer executing the script is of OS X 10.6.1 or higher type
|
Python
|
mit
|
yoda-yoda/osx-dock-dodger,denisKaranja/osx-dock-dodger
|
Verify computer executing the script is of OS X 10.6.1 or higher type
|
import platform
class OSXDodger(object):
allowed_version = "10.12.1"
def __init__(self, applications_dir):
self.app_dir = applications_dir
def load_applications(self):
"""
Read all applications in the `/Applications/` dir
"""
pass
def select_applications(self):
"""
Allow user to select an application they want
not to appear on the Dock
"""
pass
def load_dodger_filer(self):
"""
Load the file to modify for the application
chosen by the user in `select_applications`
The file to be loaded for is `info.plist`
"""
pass
def dodge_application(self):
"""
Remive the application from the Dock
"""
pass
@classmethod
def pc_is_macintosh(cls):
"""
Check if it is an `Apple Computer` i.e a Mac
@return bool
"""
system = platform.system().lower()
sys_version = int((platform.mac_ver())[0].replace(".", ""))
allowed_version = int(cls.allowed_version.replace(".", ""))
if (system == "darwin") and (sys_version >= allowed_version):
return True
else:
print("\nSorry :(")
print("FAILED. OsX-dock-dodger is only applicable to computers " +
"running OS X {} or higher".format(cls.allowed_version))
return False
dodge = OSXDodger("/Applications/")
dodge.pc_is_macintosh()
|
<commit_before><commit_msg>Verify computer executing the script is of OS X 10.6.1 or higher type<commit_after>
|
import platform
class OSXDodger(object):
allowed_version = "10.12.1"
def __init__(self, applications_dir):
self.app_dir = applications_dir
def load_applications(self):
"""
Read all applications in the `/Applications/` dir
"""
pass
def select_applications(self):
"""
Allow user to select an application they want
not to appear on the Dock
"""
pass
def load_dodger_filer(self):
"""
Load the file to modify for the application
chosen by the user in `select_applications`
The file to be loaded for is `info.plist`
"""
pass
def dodge_application(self):
"""
Remive the application from the Dock
"""
pass
@classmethod
def pc_is_macintosh(cls):
"""
Check if it is an `Apple Computer` i.e a Mac
@return bool
"""
system = platform.system().lower()
sys_version = int((platform.mac_ver())[0].replace(".", ""))
allowed_version = int(cls.allowed_version.replace(".", ""))
if (system == "darwin") and (sys_version >= allowed_version):
return True
else:
print("\nSorry :(")
print("FAILED. OsX-dock-dodger is only applicable to computers " +
"running OS X {} or higher".format(cls.allowed_version))
return False
dodge = OSXDodger("/Applications/")
dodge.pc_is_macintosh()
|
Verify computer executing the script is of OS X 10.6.1 or higher typeimport platform
class OSXDodger(object):
allowed_version = "10.12.1"
def __init__(self, applications_dir):
self.app_dir = applications_dir
def load_applications(self):
"""
Read all applications in the `/Applications/` dir
"""
pass
def select_applications(self):
"""
Allow user to select an application they want
not to appear on the Dock
"""
pass
def load_dodger_filer(self):
"""
Load the file to modify for the application
chosen by the user in `select_applications`
The file to be loaded for is `info.plist`
"""
pass
def dodge_application(self):
"""
Remive the application from the Dock
"""
pass
@classmethod
def pc_is_macintosh(cls):
"""
Check if it is an `Apple Computer` i.e a Mac
@return bool
"""
system = platform.system().lower()
sys_version = int((platform.mac_ver())[0].replace(".", ""))
allowed_version = int(cls.allowed_version.replace(".", ""))
if (system == "darwin") and (sys_version >= allowed_version):
return True
else:
print("\nSorry :(")
print("FAILED. OsX-dock-dodger is only applicable to computers " +
"running OS X {} or higher".format(cls.allowed_version))
return False
dodge = OSXDodger("/Applications/")
dodge.pc_is_macintosh()
|
<commit_before><commit_msg>Verify computer executing the script is of OS X 10.6.1 or higher type<commit_after>import platform
class OSXDodger(object):
allowed_version = "10.12.1"
def __init__(self, applications_dir):
self.app_dir = applications_dir
def load_applications(self):
"""
Read all applications in the `/Applications/` dir
"""
pass
def select_applications(self):
"""
Allow user to select an application they want
not to appear on the Dock
"""
pass
def load_dodger_filer(self):
"""
Load the file to modify for the application
chosen by the user in `select_applications`
The file to be loaded for is `info.plist`
"""
pass
def dodge_application(self):
"""
Remive the application from the Dock
"""
pass
@classmethod
def pc_is_macintosh(cls):
"""
Check if it is an `Apple Computer` i.e a Mac
@return bool
"""
system = platform.system().lower()
sys_version = int((platform.mac_ver())[0].replace(".", ""))
allowed_version = int(cls.allowed_version.replace(".", ""))
if (system == "darwin") and (sys_version >= allowed_version):
return True
else:
print("\nSorry :(")
print("FAILED. OsX-dock-dodger is only applicable to computers " +
"running OS X {} or higher".format(cls.allowed_version))
return False
dodge = OSXDodger("/Applications/")
dodge.pc_is_macintosh()
|
|
ef8b909beb4de8435c20ed0b45bca9478d476ed8
|
geocode.py
|
geocode.py
|
#! /bin/python3
import csv
import time
from geopy.geocoders.googlev3 import GoogleV3
geocoder = GoogleV3(api_key="AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4")
with open("../ReadingBusesOrig.csv") as cf:
with open("../out.csv", "a") as cw:
reader = csv.DictReader(cf)
writer = csv.DictWriter(cw, ["latitude", "longitude", "date"])
startrow = 0
for i in range(0, startrow):
row = reader[i]
location = geocoder.geocode(row['Place of Event'], components={
"locality": "Reading",
"country": "GB"
})
print("Resolved Address: " + str(location.address))
print("Latitude: " + str(location.latitude))
print("Longitude: " + str(location.longitude))
print('\n')
writer.writerow({
"latitude": location.latitude, "longitude": location.longitude, "date": row['Accident Date']
})
time.sleep(0.2)
|
Add python script to get coordinates from the csv
|
Add python script to get coordinates from the csv
|
Python
|
mit
|
awensaunders/BuSHAX0rZ,awensaunders/BuSHAX0rZ,awensaunders/BuSHAX0rZ
|
Add python script to get coordinates from the csv
|
#! /bin/python3
import csv
import time
from geopy.geocoders.googlev3 import GoogleV3
geocoder = GoogleV3(api_key="AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4")
with open("../ReadingBusesOrig.csv") as cf:
with open("../out.csv", "a") as cw:
reader = csv.DictReader(cf)
writer = csv.DictWriter(cw, ["latitude", "longitude", "date"])
startrow = 0
for i in range(0, startrow):
row = reader[i]
location = geocoder.geocode(row['Place of Event'], components={
"locality": "Reading",
"country": "GB"
})
print("Resolved Address: " + str(location.address))
print("Latitude: " + str(location.latitude))
print("Longitude: " + str(location.longitude))
print('\n')
writer.writerow({
"latitude": location.latitude, "longitude": location.longitude, "date": row['Accident Date']
})
time.sleep(0.2)
|
<commit_before><commit_msg>Add python script to get coordinates from the csv<commit_after>
|
#! /bin/python3
import csv
import time
from geopy.geocoders.googlev3 import GoogleV3
geocoder = GoogleV3(api_key="AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4")
with open("../ReadingBusesOrig.csv") as cf:
with open("../out.csv", "a") as cw:
reader = csv.DictReader(cf)
writer = csv.DictWriter(cw, ["latitude", "longitude", "date"])
startrow = 0
for i in range(0, startrow):
row = reader[i]
location = geocoder.geocode(row['Place of Event'], components={
"locality": "Reading",
"country": "GB"
})
print("Resolved Address: " + str(location.address))
print("Latitude: " + str(location.latitude))
print("Longitude: " + str(location.longitude))
print('\n')
writer.writerow({
"latitude": location.latitude, "longitude": location.longitude, "date": row['Accident Date']
})
time.sleep(0.2)
|
Add python script to get coordinates from the csv#! /bin/python3
import csv
import time
from geopy.geocoders.googlev3 import GoogleV3
geocoder = GoogleV3(api_key="AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4")
with open("../ReadingBusesOrig.csv") as cf:
with open("../out.csv", "a") as cw:
reader = csv.DictReader(cf)
writer = csv.DictWriter(cw, ["latitude", "longitude", "date"])
startrow = 0
for i in range(0, startrow):
row = reader[i]
location = geocoder.geocode(row['Place of Event'], components={
"locality": "Reading",
"country": "GB"
})
print("Resolved Address: " + str(location.address))
print("Latitude: " + str(location.latitude))
print("Longitude: " + str(location.longitude))
print('\n')
writer.writerow({
"latitude": location.latitude, "longitude": location.longitude, "date": row['Accident Date']
})
time.sleep(0.2)
|
<commit_before><commit_msg>Add python script to get coordinates from the csv<commit_after>#! /bin/python3
import csv
import time
from geopy.geocoders.googlev3 import GoogleV3
geocoder = GoogleV3(api_key="AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4")
with open("../ReadingBusesOrig.csv") as cf:
with open("../out.csv", "a") as cw:
reader = csv.DictReader(cf)
writer = csv.DictWriter(cw, ["latitude", "longitude", "date"])
startrow = 0
for i in range(0, startrow):
row = reader[i]
location = geocoder.geocode(row['Place of Event'], components={
"locality": "Reading",
"country": "GB"
})
print("Resolved Address: " + str(location.address))
print("Latitude: " + str(location.latitude))
print("Longitude: " + str(location.longitude))
print('\n')
writer.writerow({
"latitude": location.latitude, "longitude": location.longitude, "date": row['Accident Date']
})
time.sleep(0.2)
|
|
68e5bdc3c3a8a59f820ea15e706e85e14f2a654b
|
corehq/apps/locations/management/commands/fix_loc_type_reference.py
|
corehq/apps/locations/management/commands/fix_loc_type_reference.py
|
from optparse import make_option
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation, LocationType
class Command(BaseCommand):
help = "Make "
option_list = (
make_option('--dry_run',
action='store_true',
dest='dry_run',
default=False,
help='Just check what domains have problems'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),
)
def handle(self, *args, **options):
domains = (SQLLocation.objects
.order_by('domain')
.distinct('domain')
.values_list('domain', flat=True))
for domain in domains:
if has_bad_location_types(domain):
print "{} has bad location types".format(domain)
if not options['dry_run']:
if options['noinput'] or raw_input("fix? (y/N)").lower() == 'y':
fix_domain(domain)
def fix_domain(domain):
locs_w_bad_types = (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain))
print "found {} locs with bad types".format(locs_w_bad_types.count())
bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()
assert domain not in bad_types.values_list('domain', flat=True)
bad_to_good = {}
for bad_type in bad_types:
good_type = LocationType.objects.get(domain=domain, code=bad_type.code)
bad_to_good[bad_type.code] = good_type
print "successfully found corresponding loctypes on the domain for each misreferenced loctype"
for loc in locs_w_bad_types:
loc.location_type = bad_to_good[loc.location_type.code]
loc.save()
def has_bad_location_types(domain):
return (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain)
.exists())
|
Add mgmt cmd to fix bad loc-type references
|
Add mgmt cmd to fix bad loc-type references
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add mgmt cmd to fix bad loc-type references
|
from optparse import make_option
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation, LocationType
class Command(BaseCommand):
help = "Make "
option_list = (
make_option('--dry_run',
action='store_true',
dest='dry_run',
default=False,
help='Just check what domains have problems'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),
)
def handle(self, *args, **options):
domains = (SQLLocation.objects
.order_by('domain')
.distinct('domain')
.values_list('domain', flat=True))
for domain in domains:
if has_bad_location_types(domain):
print "{} has bad location types".format(domain)
if not options['dry_run']:
if options['noinput'] or raw_input("fix? (y/N)").lower() == 'y':
fix_domain(domain)
def fix_domain(domain):
locs_w_bad_types = (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain))
print "found {} locs with bad types".format(locs_w_bad_types.count())
bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()
assert domain not in bad_types.values_list('domain', flat=True)
bad_to_good = {}
for bad_type in bad_types:
good_type = LocationType.objects.get(domain=domain, code=bad_type.code)
bad_to_good[bad_type.code] = good_type
print "successfully found corresponding loctypes on the domain for each misreferenced loctype"
for loc in locs_w_bad_types:
loc.location_type = bad_to_good[loc.location_type.code]
loc.save()
def has_bad_location_types(domain):
return (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain)
.exists())
|
<commit_before><commit_msg>Add mgmt cmd to fix bad loc-type references<commit_after>
|
from optparse import make_option
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation, LocationType
class Command(BaseCommand):
help = "Make "
option_list = (
make_option('--dry_run',
action='store_true',
dest='dry_run',
default=False,
help='Just check what domains have problems'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),
)
def handle(self, *args, **options):
domains = (SQLLocation.objects
.order_by('domain')
.distinct('domain')
.values_list('domain', flat=True))
for domain in domains:
if has_bad_location_types(domain):
print "{} has bad location types".format(domain)
if not options['dry_run']:
if options['noinput'] or raw_input("fix? (y/N)").lower() == 'y':
fix_domain(domain)
def fix_domain(domain):
locs_w_bad_types = (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain))
print "found {} locs with bad types".format(locs_w_bad_types.count())
bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()
assert domain not in bad_types.values_list('domain', flat=True)
bad_to_good = {}
for bad_type in bad_types:
good_type = LocationType.objects.get(domain=domain, code=bad_type.code)
bad_to_good[bad_type.code] = good_type
print "successfully found corresponding loctypes on the domain for each misreferenced loctype"
for loc in locs_w_bad_types:
loc.location_type = bad_to_good[loc.location_type.code]
loc.save()
def has_bad_location_types(domain):
return (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain)
.exists())
|
Add mgmt cmd to fix bad loc-type referencesfrom optparse import make_option
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation, LocationType
class Command(BaseCommand):
help = "Make "
option_list = (
make_option('--dry_run',
action='store_true',
dest='dry_run',
default=False,
help='Just check what domains have problems'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),
)
def handle(self, *args, **options):
domains = (SQLLocation.objects
.order_by('domain')
.distinct('domain')
.values_list('domain', flat=True))
for domain in domains:
if has_bad_location_types(domain):
print "{} has bad location types".format(domain)
if not options['dry_run']:
if options['noinput'] or raw_input("fix? (y/N)").lower() == 'y':
fix_domain(domain)
def fix_domain(domain):
locs_w_bad_types = (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain))
print "found {} locs with bad types".format(locs_w_bad_types.count())
bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()
assert domain not in bad_types.values_list('domain', flat=True)
bad_to_good = {}
for bad_type in bad_types:
good_type = LocationType.objects.get(domain=domain, code=bad_type.code)
bad_to_good[bad_type.code] = good_type
print "successfully found corresponding loctypes on the domain for each misreferenced loctype"
for loc in locs_w_bad_types:
loc.location_type = bad_to_good[loc.location_type.code]
loc.save()
def has_bad_location_types(domain):
return (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain)
.exists())
|
<commit_before><commit_msg>Add mgmt cmd to fix bad loc-type references<commit_after>from optparse import make_option
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation, LocationType
class Command(BaseCommand):
help = "Make "
option_list = (
make_option('--dry_run',
action='store_true',
dest='dry_run',
default=False,
help='Just check what domains have problems'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'),
)
def handle(self, *args, **options):
domains = (SQLLocation.objects
.order_by('domain')
.distinct('domain')
.values_list('domain', flat=True))
for domain in domains:
if has_bad_location_types(domain):
print "{} has bad location types".format(domain)
if not options['dry_run']:
if options['noinput'] or raw_input("fix? (y/N)").lower() == 'y':
fix_domain(domain)
def fix_domain(domain):
locs_w_bad_types = (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain))
print "found {} locs with bad types".format(locs_w_bad_types.count())
bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()
assert domain not in bad_types.values_list('domain', flat=True)
bad_to_good = {}
for bad_type in bad_types:
good_type = LocationType.objects.get(domain=domain, code=bad_type.code)
bad_to_good[bad_type.code] = good_type
print "successfully found corresponding loctypes on the domain for each misreferenced loctype"
for loc in locs_w_bad_types:
loc.location_type = bad_to_good[loc.location_type.code]
loc.save()
def has_bad_location_types(domain):
return (SQLLocation.objects
.filter(domain=domain)
.exclude(location_type__domain=domain)
.exists())
|
|
bf6cfcaa1ac20c1cb65d2d803f64f35026c099f3
|
event.py
|
event.py
|
class Event:
def __init__(self):
self.listeners = []
def connect(self, listener):
self.listeners.append(listener)
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
|
Add Event class as well as connect and fire methods.
|
Add Event class as well as connect and fire methods.
|
Python
|
mit
|
bsmukasa/stock_alerter
|
Add Event class as well as connect and fire methods.
|
class Event:
def __init__(self):
self.listeners = []
def connect(self, listener):
self.listeners.append(listener)
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
|
<commit_before><commit_msg>Add Event class as well as connect and fire methods.<commit_after>
|
class Event:
def __init__(self):
self.listeners = []
def connect(self, listener):
self.listeners.append(listener)
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
|
Add Event class as well as connect and fire methods.class Event:
def __init__(self):
self.listeners = []
def connect(self, listener):
self.listeners.append(listener)
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
|
<commit_before><commit_msg>Add Event class as well as connect and fire methods.<commit_after>class Event:
def __init__(self):
self.listeners = []
def connect(self, listener):
self.listeners.append(listener)
def fire(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
|
|
8d38a72548f3bfc62bac9f49d537fa2cdee7a6df
|
face1.py
|
face1.py
|
"""Sequential, vanilla face detection."""
import datetime
import sys
import cv2
import numpy as np
import util
import cascade
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Monitor framerates.
framerate = util.RateTicker((1,5,10))
# Allow view window to be resizeable.
cv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
size = np.shape(image)[:2]
result = list()
for classi in cascade.classifiers:
rects = classi.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=3,
minSize=tuple([x/20 for x in size]),
maxSize=tuple([x/2 for x in size]),
)
if len(rects):
for a,b,c,d in rects:
result.append((a,b,c,d, cascade.colors[classi]))
for x1, y1, x2, y2, color in result:
cv2.rectangle(
image,
(x1, y1), (x1+x2, y1+y2),
color=color,
thickness=2,
)
scale = 0.85
for org, text in (
((20, int(30*scale)), '%dx%d'%(size[1], size[0])),
((20, int(60*scale)), '%.2f, %.2f, %.2f'%framerate.tick()),
):
cv2.putText(
image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=scale,
color=(0,255,0),
thickness=2,
)
cv2.imshow('face detection', image)
cv2.waitKey(1)
# The end.
|
Add vanilla sequential face detection example.
|
Add vanilla sequential face detection example.
|
Python
|
mit
|
vmlaker/sherlock
|
Add vanilla sequential face detection example.
|
"""Sequential, vanilla face detection."""
import datetime
import sys
import cv2
import numpy as np
import util
import cascade
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Monitor framerates.
framerate = util.RateTicker((1,5,10))
# Allow view window to be resizeable.
cv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
size = np.shape(image)[:2]
result = list()
for classi in cascade.classifiers:
rects = classi.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=3,
minSize=tuple([x/20 for x in size]),
maxSize=tuple([x/2 for x in size]),
)
if len(rects):
for a,b,c,d in rects:
result.append((a,b,c,d, cascade.colors[classi]))
for x1, y1, x2, y2, color in result:
cv2.rectangle(
image,
(x1, y1), (x1+x2, y1+y2),
color=color,
thickness=2,
)
scale = 0.85
for org, text in (
((20, int(30*scale)), '%dx%d'%(size[1], size[0])),
((20, int(60*scale)), '%.2f, %.2f, %.2f'%framerate.tick()),
):
cv2.putText(
image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=scale,
color=(0,255,0),
thickness=2,
)
cv2.imshow('face detection', image)
cv2.waitKey(1)
# The end.
|
<commit_before><commit_msg>Add vanilla sequential face detection example.<commit_after>
|
"""Sequential, vanilla face detection."""
import datetime
import sys
import cv2
import numpy as np
import util
import cascade
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Monitor framerates.
framerate = util.RateTicker((1,5,10))
# Allow view window to be resizeable.
cv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
size = np.shape(image)[:2]
result = list()
for classi in cascade.classifiers:
rects = classi.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=3,
minSize=tuple([x/20 for x in size]),
maxSize=tuple([x/2 for x in size]),
)
if len(rects):
for a,b,c,d in rects:
result.append((a,b,c,d, cascade.colors[classi]))
for x1, y1, x2, y2, color in result:
cv2.rectangle(
image,
(x1, y1), (x1+x2, y1+y2),
color=color,
thickness=2,
)
scale = 0.85
for org, text in (
((20, int(30*scale)), '%dx%d'%(size[1], size[0])),
((20, int(60*scale)), '%.2f, %.2f, %.2f'%framerate.tick()),
):
cv2.putText(
image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=scale,
color=(0,255,0),
thickness=2,
)
cv2.imshow('face detection', image)
cv2.waitKey(1)
# The end.
|
Add vanilla sequential face detection example."""Sequential, vanilla face detection."""
import datetime
import sys
import cv2
import numpy as np
import util
import cascade
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Monitor framerates.
framerate = util.RateTicker((1,5,10))
# Allow view window to be resizeable.
cv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
size = np.shape(image)[:2]
result = list()
for classi in cascade.classifiers:
rects = classi.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=3,
minSize=tuple([x/20 for x in size]),
maxSize=tuple([x/2 for x in size]),
)
if len(rects):
for a,b,c,d in rects:
result.append((a,b,c,d, cascade.colors[classi]))
for x1, y1, x2, y2, color in result:
cv2.rectangle(
image,
(x1, y1), (x1+x2, y1+y2),
color=color,
thickness=2,
)
scale = 0.85
for org, text in (
((20, int(30*scale)), '%dx%d'%(size[1], size[0])),
((20, int(60*scale)), '%.2f, %.2f, %.2f'%framerate.tick()),
):
cv2.putText(
image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=scale,
color=(0,255,0),
thickness=2,
)
cv2.imshow('face detection', image)
cv2.waitKey(1)
# The end.
|
<commit_before><commit_msg>Add vanilla sequential face detection example.<commit_after>"""Sequential, vanilla face detection."""
import datetime
import sys
import cv2
import numpy as np
import util
import cascade
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Monitor framerates.
framerate = util.RateTicker((1,5,10))
# Allow view window to be resizeable.
cv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
size = np.shape(image)[:2]
result = list()
for classi in cascade.classifiers:
rects = classi.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=3,
minSize=tuple([x/20 for x in size]),
maxSize=tuple([x/2 for x in size]),
)
if len(rects):
for a,b,c,d in rects:
result.append((a,b,c,d, cascade.colors[classi]))
for x1, y1, x2, y2, color in result:
cv2.rectangle(
image,
(x1, y1), (x1+x2, y1+y2),
color=color,
thickness=2,
)
scale = 0.85
for org, text in (
((20, int(30*scale)), '%dx%d'%(size[1], size[0])),
((20, int(60*scale)), '%.2f, %.2f, %.2f'%framerate.tick()),
):
cv2.putText(
image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=scale,
color=(0,255,0),
thickness=2,
)
cv2.imshow('face detection', image)
cv2.waitKey(1)
# The end.
|
|
6988dab0256ce6b6e0d5cbb4b3ac06727956ee37
|
emission/analysis/point_features.py
|
emission/analysis/point_features.py
|
# Standard imports
import math
import logging
import numpy as np
import emission.core.common as ec
def calSpeed(point1, point2):
distanceDelta = ec.calDistance([point1.mLongitude, point1.mLatitude], [point2.mLongitude, point2.mLatitude])
timeDelta = point2.mTime - point1.mTime
# print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta)
# assert(timeDelta != 0)
if (timeDelta == 0):
logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0")
assert(distanceDelta < 0.01)
return 0
# TODO: Once we perform the conversions from ms to secs as part of the
# usercache -> timeseries switch, we need to remove this division by 1000
return distanceDelta/(float(timeDelta)/1000)
|
Create a new file to calculate features from sets of points
|
Create a new file to calculate features from sets of points
|
Python
|
bsd-3-clause
|
shankari/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,shankari/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server
|
Create a new file to calculate features from sets of points
|
# Standard imports
import math
import logging
import numpy as np
import emission.core.common as ec
def calSpeed(point1, point2):
distanceDelta = ec.calDistance([point1.mLongitude, point1.mLatitude], [point2.mLongitude, point2.mLatitude])
timeDelta = point2.mTime - point1.mTime
# print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta)
# assert(timeDelta != 0)
if (timeDelta == 0):
logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0")
assert(distanceDelta < 0.01)
return 0
# TODO: Once we perform the conversions from ms to secs as part of the
# usercache -> timeseries switch, we need to remove this division by 1000
return distanceDelta/(float(timeDelta)/1000)
|
<commit_before><commit_msg>Create a new file to calculate features from sets of points<commit_after>
|
# Standard imports
import math
import logging
import numpy as np
import emission.core.common as ec
def calSpeed(point1, point2):
distanceDelta = ec.calDistance([point1.mLongitude, point1.mLatitude], [point2.mLongitude, point2.mLatitude])
timeDelta = point2.mTime - point1.mTime
# print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta)
# assert(timeDelta != 0)
if (timeDelta == 0):
logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0")
assert(distanceDelta < 0.01)
return 0
# TODO: Once we perform the conversions from ms to secs as part of the
# usercache -> timeseries switch, we need to remove this division by 1000
return distanceDelta/(float(timeDelta)/1000)
|
Create a new file to calculate features from sets of points# Standard imports
import math
import logging
import numpy as np
import emission.core.common as ec
def calSpeed(point1, point2):
distanceDelta = ec.calDistance([point1.mLongitude, point1.mLatitude], [point2.mLongitude, point2.mLatitude])
timeDelta = point2.mTime - point1.mTime
# print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta)
# assert(timeDelta != 0)
if (timeDelta == 0):
logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0")
assert(distanceDelta < 0.01)
return 0
# TODO: Once we perform the conversions from ms to secs as part of the
# usercache -> timeseries switch, we need to remove this division by 1000
return distanceDelta/(float(timeDelta)/1000)
|
<commit_before><commit_msg>Create a new file to calculate features from sets of points<commit_after># Standard imports
import math
import logging
import numpy as np
import emission.core.common as ec
def calSpeed(point1, point2):
distanceDelta = ec.calDistance([point1.mLongitude, point1.mLatitude], [point2.mLongitude, point2.mLatitude])
timeDelta = point2.mTime - point1.mTime
# print "Distance delta = %s and time delta = %s" % (distanceDelta, timeDelta)
# assert(timeDelta != 0)
if (timeDelta == 0):
logging.debug("timeDelta = 0, distanceDelta = %s, returning speed = 0")
assert(distanceDelta < 0.01)
return 0
# TODO: Once we perform the conversions from ms to secs as part of the
# usercache -> timeseries switch, we need to remove this division by 1000
return distanceDelta/(float(timeDelta)/1000)
|
|
a11d33f5e1df23f044cac709ebbbb5d369d0e6ca
|
tests/test_add_language/test_update_language_list.py
|
tests/test_add_language/test_update_language_list.py
|
# test_update_language_list
from __future__ import unicode_literals
import json
import os
import os.path
import nose.tools as nose
import yvs.shared as yvs
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@redirect_stdout
def test_update_languge_list_add(out):
"""should add new languages to language list"""
add_lang.update_language_list('kln', 'Klingon')
langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')
with open(langs_path, 'r') as langs_file:
langs = json.load(langs_file)
klingon_lang = None
for lang in langs:
if lang['id'] == 'kln':
klingon_lang = lang
nose.assert_is_not_none(klingon_lang)
nose.assert_equal(klingon_lang['name'], 'Klingon')
|
Add first test for update_language_list function
|
Add first test for update_language_list function
|
Python
|
mit
|
caleb531/youversion-suggest,caleb531/youversion-suggest
|
Add first test for update_language_list function
|
# test_update_language_list
from __future__ import unicode_literals
import json
import os
import os.path
import nose.tools as nose
import yvs.shared as yvs
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@redirect_stdout
def test_update_languge_list_add(out):
"""should add new languages to language list"""
add_lang.update_language_list('kln', 'Klingon')
langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')
with open(langs_path, 'r') as langs_file:
langs = json.load(langs_file)
klingon_lang = None
for lang in langs:
if lang['id'] == 'kln':
klingon_lang = lang
nose.assert_is_not_none(klingon_lang)
nose.assert_equal(klingon_lang['name'], 'Klingon')
|
<commit_before><commit_msg>Add first test for update_language_list function<commit_after>
|
# test_update_language_list
from __future__ import unicode_literals
import json
import os
import os.path
import nose.tools as nose
import yvs.shared as yvs
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@redirect_stdout
def test_update_languge_list_add(out):
"""should add new languages to language list"""
add_lang.update_language_list('kln', 'Klingon')
langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')
with open(langs_path, 'r') as langs_file:
langs = json.load(langs_file)
klingon_lang = None
for lang in langs:
if lang['id'] == 'kln':
klingon_lang = lang
nose.assert_is_not_none(klingon_lang)
nose.assert_equal(klingon_lang['name'], 'Klingon')
|
Add first test for update_language_list function# test_update_language_list
from __future__ import unicode_literals
import json
import os
import os.path
import nose.tools as nose
import yvs.shared as yvs
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@redirect_stdout
def test_update_languge_list_add(out):
"""should add new languages to language list"""
add_lang.update_language_list('kln', 'Klingon')
langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')
with open(langs_path, 'r') as langs_file:
langs = json.load(langs_file)
klingon_lang = None
for lang in langs:
if lang['id'] == 'kln':
klingon_lang = lang
nose.assert_is_not_none(klingon_lang)
nose.assert_equal(klingon_lang['name'], 'Klingon')
|
<commit_before><commit_msg>Add first test for update_language_list function<commit_after># test_update_language_list
from __future__ import unicode_literals
import json
import os
import os.path
import nose.tools as nose
import yvs.shared as yvs
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@redirect_stdout
def test_update_languge_list_add(out):
"""should add new languages to language list"""
add_lang.update_language_list('kln', 'Klingon')
langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')
with open(langs_path, 'r') as langs_file:
langs = json.load(langs_file)
klingon_lang = None
for lang in langs:
if lang['id'] == 'kln':
klingon_lang = lang
nose.assert_is_not_none(klingon_lang)
nose.assert_equal(klingon_lang['name'], 'Klingon')
|
|
bd5fc565c5106d609a7dc65a663515920e29caa4
|
altair/vegalite/v2/examples/multiple_marks.py
|
altair/vegalite/v2/examples/multiple_marks.py
|
"""
Multiple Marks
==============
This example demonstrates creating a single chart with multiple markers
representing the same data.
"""
import altair as alt
from vega_datasets import data
stocks = data.stocks()
chart = alt.LayerChart(stocks).encode(
x='date:T',
y='price:Q',
color='symbol:N'
).add_layers(
alt.Chart().mark_point(),
alt.Chart().mark_line()
)
|
Add example of multi-layer chart
|
Add example of multi-layer chart
|
Python
|
bsd-3-clause
|
jakevdp/altair,altair-viz/altair,ellisonbg/altair
|
Add example of multi-layer chart
|
"""
Multiple Marks
==============
This example demonstrates creating a single chart with multiple markers
representing the same data.
"""
import altair as alt
from vega_datasets import data
stocks = data.stocks()
chart = alt.LayerChart(stocks).encode(
x='date:T',
y='price:Q',
color='symbol:N'
).add_layers(
alt.Chart().mark_point(),
alt.Chart().mark_line()
)
|
<commit_before><commit_msg>Add example of multi-layer chart<commit_after>
|
"""
Multiple Marks
==============
This example demonstrates creating a single chart with multiple markers
representing the same data.
"""
import altair as alt
from vega_datasets import data
stocks = data.stocks()
chart = alt.LayerChart(stocks).encode(
x='date:T',
y='price:Q',
color='symbol:N'
).add_layers(
alt.Chart().mark_point(),
alt.Chart().mark_line()
)
|
Add example of multi-layer chart"""
Multiple Marks
==============
This example demonstrates creating a single chart with multiple markers
representing the same data.
"""
import altair as alt
from vega_datasets import data
stocks = data.stocks()
chart = alt.LayerChart(stocks).encode(
x='date:T',
y='price:Q',
color='symbol:N'
).add_layers(
alt.Chart().mark_point(),
alt.Chart().mark_line()
)
|
<commit_before><commit_msg>Add example of multi-layer chart<commit_after>"""
Multiple Marks
==============
This example demonstrates creating a single chart with multiple markers
representing the same data.
"""
import altair as alt
from vega_datasets import data
stocks = data.stocks()
chart = alt.LayerChart(stocks).encode(
x='date:T',
y='price:Q',
color='symbol:N'
).add_layers(
alt.Chart().mark_point(),
alt.Chart().mark_line()
)
|
|
e7c6a1d5ca6c6ebd85976698e8c00ca761747b59
|
apps/simple_compiler.py
|
apps/simple_compiler.py
|
from apps.decorators import on_command
from apps.slackutils import cat_token
from subprocess import check_output, CalledProcessError, STDOUT
import os
import re
@on_command(['!์ปดํ์ผ'])
def run(robot, channel, tokens, user, command):
'''C, C++, Python ์์ค ์คํ์์ผ๋๋ฆผ'''
msg = ''
if len(tokens) < 2:
return channel, '์์ธํ ์ฌ์ฉ๋ฐฉ๋ฒ์...'
if tokens[0].lower() in ['c', 'c++']:
source = cat_token(tokens, 1)
source = re.sub('&', '&', source)
source = re.sub('<', '<', source)
source = re.sub('>', '>', source)
source = re.sub(r'(#.*>)', r'\1\n', source)
if tokens[0].lower() == 'c':
open(user + '.c', 'w').write(source)
msg += check_output(['gcc', user + '.c', '-o', user + '.out']).decode('utf-8')
os.remove(user + '.c')
else:
open(user + '.cpp', 'w').write(source)
try:
msg += check_output(['g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'], stderr=STDOUT).decode('utf-8')
except CalledProcessError as e:
msg += e.output.decode('utf-8')
return channel, msg
os.remove(user + '.cpp')
try:
msg += check_output(['./' + user + '.out']).decode('utf-8')
except CalledProcessError as e:
msg += '> :warning: WARNING : Your program returned exit status `' + str(e.args[0]) +'`\n'
msg += e.output.decode('utf-8')
os.remove(user + '.out')
return channel, msg
|
ADD FEATURE : simple C/C++ compiler
|
ADD FEATURE : simple C/C++ compiler
|
Python
|
mit
|
dgu-dna/DNA-Bot
|
ADD FEATURE : simple C/C++ compiler
|
from apps.decorators import on_command
from apps.slackutils import cat_token
from subprocess import check_output, CalledProcessError, STDOUT
import os
import re
@on_command(['!์ปดํ์ผ'])
def run(robot, channel, tokens, user, command):
'''C, C++, Python ์์ค ์คํ์์ผ๋๋ฆผ'''
msg = ''
if len(tokens) < 2:
return channel, '์์ธํ ์ฌ์ฉ๋ฐฉ๋ฒ์...'
if tokens[0].lower() in ['c', 'c++']:
source = cat_token(tokens, 1)
source = re.sub('&', '&', source)
source = re.sub('<', '<', source)
source = re.sub('>', '>', source)
source = re.sub(r'(#.*>)', r'\1\n', source)
if tokens[0].lower() == 'c':
open(user + '.c', 'w').write(source)
msg += check_output(['gcc', user + '.c', '-o', user + '.out']).decode('utf-8')
os.remove(user + '.c')
else:
open(user + '.cpp', 'w').write(source)
try:
msg += check_output(['g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'], stderr=STDOUT).decode('utf-8')
except CalledProcessError as e:
msg += e.output.decode('utf-8')
return channel, msg
os.remove(user + '.cpp')
try:
msg += check_output(['./' + user + '.out']).decode('utf-8')
except CalledProcessError as e:
msg += '> :warning: WARNING : Your program returned exit status `' + str(e.args[0]) +'`\n'
msg += e.output.decode('utf-8')
os.remove(user + '.out')
return channel, msg
|
<commit_before><commit_msg>ADD FEATURE : simple C/C++ compiler<commit_after>
|
from apps.decorators import on_command
from apps.slackutils import cat_token
from subprocess import check_output, CalledProcessError, STDOUT
import os
import re
@on_command(['!์ปดํ์ผ'])
def run(robot, channel, tokens, user, command):
'''C, C++, Python ์์ค ์คํ์์ผ๋๋ฆผ'''
msg = ''
if len(tokens) < 2:
return channel, '์์ธํ ์ฌ์ฉ๋ฐฉ๋ฒ์...'
if tokens[0].lower() in ['c', 'c++']:
source = cat_token(tokens, 1)
source = re.sub('&', '&', source)
source = re.sub('<', '<', source)
source = re.sub('>', '>', source)
source = re.sub(r'(#.*>)', r'\1\n', source)
if tokens[0].lower() == 'c':
open(user + '.c', 'w').write(source)
msg += check_output(['gcc', user + '.c', '-o', user + '.out']).decode('utf-8')
os.remove(user + '.c')
else:
open(user + '.cpp', 'w').write(source)
try:
msg += check_output(['g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'], stderr=STDOUT).decode('utf-8')
except CalledProcessError as e:
msg += e.output.decode('utf-8')
return channel, msg
os.remove(user + '.cpp')
try:
msg += check_output(['./' + user + '.out']).decode('utf-8')
except CalledProcessError as e:
msg += '> :warning: WARNING : Your program returned exit status `' + str(e.args[0]) +'`\n'
msg += e.output.decode('utf-8')
os.remove(user + '.out')
return channel, msg
|
ADD FEATURE : simple C/C++ compilerfrom apps.decorators import on_command
from apps.slackutils import cat_token
from subprocess import check_output, CalledProcessError, STDOUT
import os
import re
@on_command(['!์ปดํ์ผ'])
def run(robot, channel, tokens, user, command):
'''C, C++, Python ์์ค ์คํ์์ผ๋๋ฆผ'''
msg = ''
if len(tokens) < 2:
return channel, '์์ธํ ์ฌ์ฉ๋ฐฉ๋ฒ์...'
if tokens[0].lower() in ['c', 'c++']:
source = cat_token(tokens, 1)
source = re.sub('&', '&', source)
source = re.sub('<', '<', source)
source = re.sub('>', '>', source)
source = re.sub(r'(#.*>)', r'\1\n', source)
if tokens[0].lower() == 'c':
open(user + '.c', 'w').write(source)
msg += check_output(['gcc', user + '.c', '-o', user + '.out']).decode('utf-8')
os.remove(user + '.c')
else:
open(user + '.cpp', 'w').write(source)
try:
msg += check_output(['g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'], stderr=STDOUT).decode('utf-8')
except CalledProcessError as e:
msg += e.output.decode('utf-8')
return channel, msg
os.remove(user + '.cpp')
try:
msg += check_output(['./' + user + '.out']).decode('utf-8')
except CalledProcessError as e:
msg += '> :warning: WARNING : Your program returned exit status `' + str(e.args[0]) +'`\n'
msg += e.output.decode('utf-8')
os.remove(user + '.out')
return channel, msg
|
<commit_before><commit_msg>ADD FEATURE : simple C/C++ compiler<commit_after>from apps.decorators import on_command
from apps.slackutils import cat_token
from subprocess import check_output, CalledProcessError, STDOUT
import os
import re
@on_command(['!์ปดํ์ผ'])
def run(robot, channel, tokens, user, command):
'''C, C++, Python ์์ค ์คํ์์ผ๋๋ฆผ'''
msg = ''
if len(tokens) < 2:
return channel, '์์ธํ ์ฌ์ฉ๋ฐฉ๋ฒ์...'
if tokens[0].lower() in ['c', 'c++']:
source = cat_token(tokens, 1)
source = re.sub('&', '&', source)
source = re.sub('<', '<', source)
source = re.sub('>', '>', source)
source = re.sub(r'(#.*>)', r'\1\n', source)
if tokens[0].lower() == 'c':
open(user + '.c', 'w').write(source)
msg += check_output(['gcc', user + '.c', '-o', user + '.out']).decode('utf-8')
os.remove(user + '.c')
else:
open(user + '.cpp', 'w').write(source)
try:
msg += check_output(['g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'], stderr=STDOUT).decode('utf-8')
except CalledProcessError as e:
msg += e.output.decode('utf-8')
return channel, msg
os.remove(user + '.cpp')
try:
msg += check_output(['./' + user + '.out']).decode('utf-8')
except CalledProcessError as e:
msg += '> :warning: WARNING : Your program returned exit status `' + str(e.args[0]) +'`\n'
msg += e.output.decode('utf-8')
os.remove(user + '.out')
return channel, msg
|
|
852ecb67e11f4ad9662c832d8be5f5bf1b8327b1
|
pyface/action/tests/test_group.py
|
pyface/action/tests/test_group.py
|
from __future__ import absolute_import
from traits.testing.unittest_tools import UnittestTools, unittest
from ...image_cache import ImageCache
from ...window import Window
from ..action import Action
from ..action_item import ActionItem
from ..group import Group
class TestActionItem(unittest.TestCase, UnittestTools):
def setUp(self):
# test whether function is called by updating list
# XXX should really use mock
self.memo = []
def perform():
self.memo.append('called')
self.perform = perform
self.action = Action(name='Test', on_perform=perform)
self.action_item = ActionItem(action=self.action)
def test_init_action_item(self):
group = Group(self.action_item)
self.assertEqual(group.items, [self.action_item])
def test_init_action(self):
group = Group(self.action)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action, self.action)
def test_init_callable(self):
group = Group(self.perform)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action.on_perform, self.perform)
self.assertEqual(group.items[0].action.name, "Perform")
|
Add tests for action groups.
|
Add tests for action groups.
|
Python
|
bsd-3-clause
|
geggo/pyface,brett-patterson/pyface,geggo/pyface
|
Add tests for action groups.
|
from __future__ import absolute_import
from traits.testing.unittest_tools import UnittestTools, unittest
from ...image_cache import ImageCache
from ...window import Window
from ..action import Action
from ..action_item import ActionItem
from ..group import Group
class TestActionItem(unittest.TestCase, UnittestTools):
def setUp(self):
# test whether function is called by updating list
# XXX should really use mock
self.memo = []
def perform():
self.memo.append('called')
self.perform = perform
self.action = Action(name='Test', on_perform=perform)
self.action_item = ActionItem(action=self.action)
def test_init_action_item(self):
group = Group(self.action_item)
self.assertEqual(group.items, [self.action_item])
def test_init_action(self):
group = Group(self.action)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action, self.action)
def test_init_callable(self):
group = Group(self.perform)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action.on_perform, self.perform)
self.assertEqual(group.items[0].action.name, "Perform")
|
<commit_before><commit_msg>Add tests for action groups.<commit_after>
|
from __future__ import absolute_import
from traits.testing.unittest_tools import UnittestTools, unittest
from ...image_cache import ImageCache
from ...window import Window
from ..action import Action
from ..action_item import ActionItem
from ..group import Group
class TestActionItem(unittest.TestCase, UnittestTools):
def setUp(self):
# test whether function is called by updating list
# XXX should really use mock
self.memo = []
def perform():
self.memo.append('called')
self.perform = perform
self.action = Action(name='Test', on_perform=perform)
self.action_item = ActionItem(action=self.action)
def test_init_action_item(self):
group = Group(self.action_item)
self.assertEqual(group.items, [self.action_item])
def test_init_action(self):
group = Group(self.action)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action, self.action)
def test_init_callable(self):
group = Group(self.perform)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action.on_perform, self.perform)
self.assertEqual(group.items[0].action.name, "Perform")
|
Add tests for action groups.from __future__ import absolute_import
from traits.testing.unittest_tools import UnittestTools, unittest
from ...image_cache import ImageCache
from ...window import Window
from ..action import Action
from ..action_item import ActionItem
from ..group import Group
class TestActionItem(unittest.TestCase, UnittestTools):
def setUp(self):
# test whether function is called by updating list
# XXX should really use mock
self.memo = []
def perform():
self.memo.append('called')
self.perform = perform
self.action = Action(name='Test', on_perform=perform)
self.action_item = ActionItem(action=self.action)
def test_init_action_item(self):
group = Group(self.action_item)
self.assertEqual(group.items, [self.action_item])
def test_init_action(self):
group = Group(self.action)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action, self.action)
def test_init_callable(self):
group = Group(self.perform)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action.on_perform, self.perform)
self.assertEqual(group.items[0].action.name, "Perform")
|
<commit_before><commit_msg>Add tests for action groups.<commit_after>from __future__ import absolute_import
from traits.testing.unittest_tools import UnittestTools, unittest
from ...image_cache import ImageCache
from ...window import Window
from ..action import Action
from ..action_item import ActionItem
from ..group import Group
class TestActionItem(unittest.TestCase, UnittestTools):
def setUp(self):
# test whether function is called by updating list
# XXX should really use mock
self.memo = []
def perform():
self.memo.append('called')
self.perform = perform
self.action = Action(name='Test', on_perform=perform)
self.action_item = ActionItem(action=self.action)
def test_init_action_item(self):
group = Group(self.action_item)
self.assertEqual(group.items, [self.action_item])
def test_init_action(self):
group = Group(self.action)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action, self.action)
def test_init_callable(self):
group = Group(self.perform)
self.assertEqual(len(group.items), 1)
self.assertEqual(group.items[0].action.on_perform, self.perform)
self.assertEqual(group.items[0].action.name, "Perform")
|
|
76f636d38d6f3947efe6d58eacbd655027fc1a0e
|
hooks/post_gen_project.py
|
hooks/post_gen_project.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Use symlink to handle the difference between project name and role name.
os.symlink('../../{{ cookiecutter.role_project_name }}', 'roles/{{ cookiecutter.role_name }}')
|
Add post-hook to handle the difference between project name and role name.
|
Add post-hook to handle the difference between project name and role name.
|
Python
|
mit
|
FGtatsuro/cookiecutter-ansible-role,FGtatsuro/cookiecutter-ansible-role
|
Add post-hook to handle the difference between project name and role name.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Use symlink to handle the difference between project name and role name.
os.symlink('../../{{ cookiecutter.role_project_name }}', 'roles/{{ cookiecutter.role_name }}')
|
<commit_before><commit_msg>Add post-hook to handle the difference between project name and role name.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Use symlink to handle the difference between project name and role name.
os.symlink('../../{{ cookiecutter.role_project_name }}', 'roles/{{ cookiecutter.role_name }}')
|
Add post-hook to handle the difference between project name and role name.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Use symlink to handle the difference between project name and role name.
os.symlink('../../{{ cookiecutter.role_project_name }}', 'roles/{{ cookiecutter.role_name }}')
|
<commit_before><commit_msg>Add post-hook to handle the difference between project name and role name.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Use symlink to handle the difference between project name and role name.
os.symlink('../../{{ cookiecutter.role_project_name }}', 'roles/{{ cookiecutter.role_name }}')
|
|
1ace5845055050c0ac3d9e14a5f76ad78f6778bf
|
python/calc_fuel_pitch_fraction.py
|
python/calc_fuel_pitch_fraction.py
|
import sympy as sp
import sys
def calc_spacing(R, n):
x = sp.symbols('x')
Af = 0
Am = 0
for m in range(2*n - 1):
if m % 2 == 0:
Af += sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Af -= sp.pi * (R/n * (m+1)/2)**2
for m in range(2*n):
if m % 2 == 0:
Am -= sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Am += sp.pi * (R/n * (m+1)/2)**2
return sp.solve(Af / (Af + Am) - .225, x)
print(calc_spacing(float(sys.argv[1]), int(sys.argv[2])))
|
Add python script for calculating fuel pitch fraction.
|
Add python script for calculating fuel pitch fraction.
|
Python
|
lgpl-2.1
|
gridley/moltres,arfc/moltres,arfc/moltres,arfc/moltres,arfc/moltres,lindsayad/moltres,lindsayad/moltres,lindsayad/moltres,lindsayad/moltres,gridley/moltres,gridley/moltres,gridley/moltres
|
Add python script for calculating fuel pitch fraction.
|
import sympy as sp
import sys
def calc_spacing(R, n):
x = sp.symbols('x')
Af = 0
Am = 0
for m in range(2*n - 1):
if m % 2 == 0:
Af += sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Af -= sp.pi * (R/n * (m+1)/2)**2
for m in range(2*n):
if m % 2 == 0:
Am -= sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Am += sp.pi * (R/n * (m+1)/2)**2
return sp.solve(Af / (Af + Am) - .225, x)
print(calc_spacing(float(sys.argv[1]), int(sys.argv[2])))
|
<commit_before><commit_msg>Add python script for calculating fuel pitch fraction.<commit_after>
|
import sympy as sp
import sys
def calc_spacing(R, n):
x = sp.symbols('x')
Af = 0
Am = 0
for m in range(2*n - 1):
if m % 2 == 0:
Af += sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Af -= sp.pi * (R/n * (m+1)/2)**2
for m in range(2*n):
if m % 2 == 0:
Am -= sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Am += sp.pi * (R/n * (m+1)/2)**2
return sp.solve(Af / (Af + Am) - .225, x)
print(calc_spacing(float(sys.argv[1]), int(sys.argv[2])))
|
Add python script for calculating fuel pitch fraction.import sympy as sp
import sys
def calc_spacing(R, n):
x = sp.symbols('x')
Af = 0
Am = 0
for m in range(2*n - 1):
if m % 2 == 0:
Af += sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Af -= sp.pi * (R/n * (m+1)/2)**2
for m in range(2*n):
if m % 2 == 0:
Am -= sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Am += sp.pi * (R/n * (m+1)/2)**2
return sp.solve(Af / (Af + Am) - .225, x)
print(calc_spacing(float(sys.argv[1]), int(sys.argv[2])))
|
<commit_before><commit_msg>Add python script for calculating fuel pitch fraction.<commit_after>import sympy as sp
import sys
def calc_spacing(R, n):
x = sp.symbols('x')
Af = 0
Am = 0
for m in range(2*n - 1):
if m % 2 == 0:
Af += sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Af -= sp.pi * (R/n * (m+1)/2)**2
for m in range(2*n):
if m % 2 == 0:
Am -= sp.pi * (R/n * (m/2 + x))**2
if m % 2 == 1:
Am += sp.pi * (R/n * (m+1)/2)**2
return sp.solve(Af / (Af + Am) - .225, x)
print(calc_spacing(float(sys.argv[1]), int(sys.argv[2])))
|
|
bfd85c18e788c4e89569f7a35fa85d80d3bcd031
|
reducer/ipython_version_helper.py
|
reducer/ipython_version_helper.py
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from IPython import version_info
def ipython_version_as_string():
"""
The IPython version is a tuple (major, minor, patch, vendor). We only
need major, minor, patch.
"""
return ''.join([str(s) for s in version_info[0:3]])
|
Add the IPython version helper
|
Add the IPython version helper
|
Python
|
bsd-3-clause
|
astrofrog/reducer,astrofrog/reducer,StuartLittlefair/reducer,StuartLittlefair/reducer,mwcraig/reducer
|
Add the IPython version helper
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from IPython import version_info
def ipython_version_as_string():
"""
The IPython version is a tuple (major, minor, patch, vendor). We only
need major, minor, patch.
"""
return ''.join([str(s) for s in version_info[0:3]])
|
<commit_before><commit_msg>Add the IPython version helper<commit_after>
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from IPython import version_info
def ipython_version_as_string():
"""
The IPython version is a tuple (major, minor, patch, vendor). We only
need major, minor, patch.
"""
return ''.join([str(s) for s in version_info[0:3]])
|
Add the IPython version helperfrom __future__ import (division, print_function, absolute_import,
unicode_literals)
from IPython import version_info
def ipython_version_as_string():
"""
The IPython version is a tuple (major, minor, patch, vendor). We only
need major, minor, patch.
"""
return ''.join([str(s) for s in version_info[0:3]])
|
<commit_before><commit_msg>Add the IPython version helper<commit_after>from __future__ import (division, print_function, absolute_import,
unicode_literals)
from IPython import version_info
def ipython_version_as_string():
"""
The IPython version is a tuple (major, minor, patch, vendor). We only
need major, minor, patch.
"""
return ''.join([str(s) for s in version_info[0:3]])
|
|
f2044b0771f278c2a0de5cb69783b264a2d2363d
|
cluster_metrics.py
|
cluster_metrics.py
|
"Calculates Silhouette coefficient and Calinski-Harabaz index for a kmeans model."
import os, sys
import argparse, joblib
from sklearn import metrics
def cluster_metrics(data_file_path):
if not os.path.exists(data_file_path + '/kmodel.txt'):
print('No k-means model file found.')
sys.exit(0)
kmodel = joblib.load(data_file_path + '/kmodel.txt')
# If no topic_space.txt file exists, the clustering was performed on the
# Tf-Idf matrix so load that instead.
if os.path.exists(data_file_path + '/topic_space.txt'):
vector_space = joblib.load(data_file_path + '/topic_space.txt')
print('Calculating metrics for topic vector space.')
else:
vector_space = joblib.load(data_file_path + '/tfidf_sparse.txt')
print('Calculating metrics for Tf-Idf vector space.')
silhouette = metrics.silhouette_score(vector_space, kmodel.labels_,
metric='euclidean')
calhar = metrics.calinski_harabaz_score(vector_space.toarray(), kmodel.labels_)
with open(data_file_path + '/metric_results.txt', 'w+') as output:
output.write('Silhouette coefficient: ' + str(silhouette))
output.write('\nCaliski-Harabaz index: ' + str(calhar))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parses data file path.')
parser.add_argument('data_file_path', type=str,
help='The file to the data directory.')
args = parser.parse_args()
cluster_metrics(args.data_file_path)
|
Add clustering metrics calculator file.
|
Add clustering metrics calculator file.
|
Python
|
mit
|
theovasi/browsewiki,theovasi/browsewiki,theovasi/browsewiki
|
Add clustering metrics calculator file.
|
"Calculates Silhouette coefficient and Calinski-Harabaz index for a kmeans model."
import os, sys
import argparse, joblib
from sklearn import metrics
def cluster_metrics(data_file_path):
if not os.path.exists(data_file_path + '/kmodel.txt'):
print('No k-means model file found.')
sys.exit(0)
kmodel = joblib.load(data_file_path + '/kmodel.txt')
# If no topic_space.txt file exists, the clustering was performed on the
# Tf-Idf matrix so load that instead.
if os.path.exists(data_file_path + '/topic_space.txt'):
vector_space = joblib.load(data_file_path + '/topic_space.txt')
print('Calculating metrics for topic vector space.')
else:
vector_space = joblib.load(data_file_path + '/tfidf_sparse.txt')
print('Calculating metrics for Tf-Idf vector space.')
silhouette = metrics.silhouette_score(vector_space, kmodel.labels_,
metric='euclidean')
calhar = metrics.calinski_harabaz_score(vector_space.toarray(), kmodel.labels_)
with open(data_file_path + '/metric_results.txt', 'w+') as output:
output.write('Silhouette coefficient: ' + str(silhouette))
output.write('\nCaliski-Harabaz index: ' + str(calhar))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parses data file path.')
parser.add_argument('data_file_path', type=str,
help='The file to the data directory.')
args = parser.parse_args()
cluster_metrics(args.data_file_path)
|
<commit_before><commit_msg>Add clustering metrics calculator file.<commit_after>
|
"Calculates Silhouette coefficient and Calinski-Harabaz index for a kmeans model."
import os, sys
import argparse, joblib
from sklearn import metrics
def cluster_metrics(data_file_path):
if not os.path.exists(data_file_path + '/kmodel.txt'):
print('No k-means model file found.')
sys.exit(0)
kmodel = joblib.load(data_file_path + '/kmodel.txt')
# If no topic_space.txt file exists, the clustering was performed on the
# Tf-Idf matrix so load that instead.
if os.path.exists(data_file_path + '/topic_space.txt'):
vector_space = joblib.load(data_file_path + '/topic_space.txt')
print('Calculating metrics for topic vector space.')
else:
vector_space = joblib.load(data_file_path + '/tfidf_sparse.txt')
print('Calculating metrics for Tf-Idf vector space.')
silhouette = metrics.silhouette_score(vector_space, kmodel.labels_,
metric='euclidean')
calhar = metrics.calinski_harabaz_score(vector_space.toarray(), kmodel.labels_)
with open(data_file_path + '/metric_results.txt', 'w+') as output:
output.write('Silhouette coefficient: ' + str(silhouette))
output.write('\nCaliski-Harabaz index: ' + str(calhar))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parses data file path.')
parser.add_argument('data_file_path', type=str,
help='The file to the data directory.')
args = parser.parse_args()
cluster_metrics(args.data_file_path)
|
Add clustering metrics calculator file."Calculates Silhouette coefficient and Calinski-Harabaz index for a kmeans model."
import os, sys
import argparse, joblib
from sklearn import metrics
def cluster_metrics(data_file_path):
if not os.path.exists(data_file_path + '/kmodel.txt'):
print('No k-means model file found.')
sys.exit(0)
kmodel = joblib.load(data_file_path + '/kmodel.txt')
# If no topic_space.txt file exists, the clustering was performed on the
# Tf-Idf matrix so load that instead.
if os.path.exists(data_file_path + '/topic_space.txt'):
vector_space = joblib.load(data_file_path + '/topic_space.txt')
print('Calculating metrics for topic vector space.')
else:
vector_space = joblib.load(data_file_path + '/tfidf_sparse.txt')
print('Calculating metrics for Tf-Idf vector space.')
silhouette = metrics.silhouette_score(vector_space, kmodel.labels_,
metric='euclidean')
calhar = metrics.calinski_harabaz_score(vector_space.toarray(), kmodel.labels_)
with open(data_file_path + '/metric_results.txt', 'w+') as output:
output.write('Silhouette coefficient: ' + str(silhouette))
output.write('\nCaliski-Harabaz index: ' + str(calhar))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parses data file path.')
parser.add_argument('data_file_path', type=str,
help='The file to the data directory.')
args = parser.parse_args()
cluster_metrics(args.data_file_path)
|
<commit_before><commit_msg>Add clustering metrics calculator file.<commit_after>"Calculates Silhouette coefficient and Calinski-Harabaz index for a kmeans model."
import os, sys
import argparse, joblib
from sklearn import metrics
def cluster_metrics(data_file_path):
if not os.path.exists(data_file_path + '/kmodel.txt'):
print('No k-means model file found.')
sys.exit(0)
kmodel = joblib.load(data_file_path + '/kmodel.txt')
# If no topic_space.txt file exists, the clustering was performed on the
# Tf-Idf matrix so load that instead.
if os.path.exists(data_file_path + '/topic_space.txt'):
vector_space = joblib.load(data_file_path + '/topic_space.txt')
print('Calculating metrics for topic vector space.')
else:
vector_space = joblib.load(data_file_path + '/tfidf_sparse.txt')
print('Calculating metrics for Tf-Idf vector space.')
silhouette = metrics.silhouette_score(vector_space, kmodel.labels_,
metric='euclidean')
calhar = metrics.calinski_harabaz_score(vector_space.toarray(), kmodel.labels_)
with open(data_file_path + '/metric_results.txt', 'w+') as output:
output.write('Silhouette coefficient: ' + str(silhouette))
output.write('\nCaliski-Harabaz index: ' + str(calhar))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Parses data file path.')
parser.add_argument('data_file_path', type=str,
help='The file to the data directory.')
args = parser.parse_args()
cluster_metrics(args.data_file_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.