commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd4035fc8dcb5b582ca154670b77d719a11f44cf
|
src/webapp/dummy_data.py
|
src/webapp/dummy_data.py
|
from math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team = team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand **2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
|
Add a function to insert dummy data in the database
|
Add a function to insert dummy data in the database
|
Python
|
bsd-3-clause
|
janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system
|
Add a function to insert dummy data in the database
|
from math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team = team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand **2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
|
<commit_before><commit_msg>Add a function to insert dummy data in the database<commit_after>
|
from math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team = team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand **2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
|
Add a function to insert dummy data in the databasefrom math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team = team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand **2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
|
<commit_before><commit_msg>Add a function to insert dummy data in the database<commit_after>from math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team = team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand **2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
|
|
82e2da1363441177216b0c230232aa061f18714e
|
alembic/versions/7c9bbf3a039a_game_stats_stat_id.py
|
alembic/versions/7c9bbf3a039a_game_stats_stat_id.py
|
revision = '7c9bbf3a039a'
down_revision = '89c5cb66426d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
shows = meta.tables["shows"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is shows and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `shows.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'stats', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
stats = meta.tables["stats"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is stats and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `stats.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'shows', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
|
Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
|
Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
Closes #250.
|
Python
|
apache-2.0
|
andreasots/lrrbot,mrphlip/lrrbot,andreasots/lrrbot,andreasots/lrrbot,mrphlip/lrrbot,mrphlip/lrrbot
|
Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
Closes #250.
|
revision = '7c9bbf3a039a'
down_revision = '89c5cb66426d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
shows = meta.tables["shows"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is shows and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `shows.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'stats', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
stats = meta.tables["stats"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is stats and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `stats.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'shows', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
|
<commit_before><commit_msg>Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
Closes #250.<commit_after>
|
revision = '7c9bbf3a039a'
down_revision = '89c5cb66426d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
shows = meta.tables["shows"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is shows and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `shows.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'stats', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
stats = meta.tables["stats"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is stats and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `stats.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'shows', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
|
Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
Closes #250.revision = '7c9bbf3a039a'
down_revision = '89c5cb66426d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
shows = meta.tables["shows"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is shows and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `shows.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'stats', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
stats = meta.tables["stats"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is stats and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `stats.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'shows', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
|
<commit_before><commit_msg>Update the foreign key on `game_stats.stat_id` to reference `stats.id` instead of `shows.id`
Closes #250.<commit_after>revision = '7c9bbf3a039a'
down_revision = '89c5cb66426d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
shows = meta.tables["shows"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is shows and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `shows.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'stats', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
game_stats = meta.tables["game_stats"]
stats = meta.tables["stats"]
constraint_name = None
for fk in game_stats.c.stat_id.foreign_keys:
if fk.column.table is stats and fk.column.name == "id":
constraint_name = fk.name
break
else:
raise Exception("Failed to find a foreign key on `game_stats.stat_id` that references `stats.id`")
alembic.op.drop_constraint(constraint_name, 'game_stats')
alembic.op.create_foreign_key(constraint_name, 'game_stats', 'shows', ["stat_id"], ["id"], onupdate="CASCADE", ondelete="CASCADE")
|
|
58316a5823e2e136b2b5687d4aef323ad8a86cee
|
senlin/tests/functional/drivers/openstack/sdk.py
|
senlin/tests/functional/drivers/openstack/sdk.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
def to_dict(self):
data = {
'image': {
'id': 'FAKE_IMAGE'
},
'flavor': {
'id': 'FAKE_FLAVOR'
},
'addresses': {
'private': [
{
'OS-EXT-IPS:type': 'fixed',
'addr': '10.0.0.5',
'version': 4
}
]
},
'security_groups': [
{
'name': 'default'
}
]
}
return data
|
Add to_dict() method for faked resource
|
Add to_dict() method for faked resource
When testing node_get with details, we expect the profile to return a
resource that has a to_dict() method. The existing faked resource
doesn't support this yet. This patch fixes it.
Change-Id: I52e0dad74a1140f8233280ff10a9c14ff1760f72
|
Python
|
apache-2.0
|
stackforge/senlin,openstack/senlin,openstack/senlin,openstack/senlin,tengqm/senlin-container,stackforge/senlin,tengqm/senlin-container
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
Add to_dict() method for faked resource
When testing node_get with details, we expect the profile to return a
resource that has a to_dict() method. The existing faked resource
doesn't support this yet. This patch fixes it.
Change-Id: I52e0dad74a1140f8233280ff10a9c14ff1760f72
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
def to_dict(self):
data = {
'image': {
'id': 'FAKE_IMAGE'
},
'flavor': {
'id': 'FAKE_FLAVOR'
},
'addresses': {
'private': [
{
'OS-EXT-IPS:type': 'fixed',
'addr': '10.0.0.5',
'version': 4
}
]
},
'security_groups': [
{
'name': 'default'
}
]
}
return data
|
<commit_before># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
<commit_msg>Add to_dict() method for faked resource
When testing node_get with details, we expect the profile to return a
resource that has a to_dict() method. The existing faked resource
doesn't support this yet. This patch fixes it.
Change-Id: I52e0dad74a1140f8233280ff10a9c14ff1760f72<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
def to_dict(self):
data = {
'image': {
'id': 'FAKE_IMAGE'
},
'flavor': {
'id': 'FAKE_FLAVOR'
},
'addresses': {
'private': [
{
'OS-EXT-IPS:type': 'fixed',
'addr': '10.0.0.5',
'version': 4
}
]
},
'security_groups': [
{
'name': 'default'
}
]
}
return data
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
Add to_dict() method for faked resource
When testing node_get with details, we expect the profile to return a
resource that has a to_dict() method. The existing faked resource
doesn't support this yet. This patch fixes it.
Change-Id: I52e0dad74a1140f8233280ff10a9c14ff1760f72# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
def to_dict(self):
data = {
'image': {
'id': 'FAKE_IMAGE'
},
'flavor': {
'id': 'FAKE_FLAVOR'
},
'addresses': {
'private': [
{
'OS-EXT-IPS:type': 'fixed',
'addr': '10.0.0.5',
'version': 4
}
]
},
'security_groups': [
{
'name': 'default'
}
]
}
return data
|
<commit_before># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
<commit_msg>Add to_dict() method for faked resource
When testing node_get with details, we expect the profile to return a
resource that has a to_dict() method. The existing faked resource
doesn't support this yet. This patch fixes it.
Change-Id: I52e0dad74a1140f8233280ff10a9c14ff1760f72<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeResourceObject(object):
'''Generate a fake SDK resource object based on given dictionary'''
def __init__(self, params):
for key in params:
setattr(self, key, params[key])
def to_dict(self):
data = {
'image': {
'id': 'FAKE_IMAGE'
},
'flavor': {
'id': 'FAKE_FLAVOR'
},
'addresses': {
'private': [
{
'OS-EXT-IPS:type': 'fixed',
'addr': '10.0.0.5',
'version': 4
}
]
},
'security_groups': [
{
'name': 'default'
}
]
}
return data
|
68e79e2c45d173d950f203e11a95452fc40b2b8e
|
tests/grammar_creation_test/RulesCopyTest.py
|
tests/grammar_creation_test/RulesCopyTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 18:45
:Licence GNUv3
Part of grammpy
"""
from copy import deepcopy
from unittest import TestCase, main
from grammpy import *
class RulesAddingTest(TestCase):
def test_copyOfSingleRule(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
fR.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
def test_copyOfMoreRules(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R1(Rule):
rule=([A], [0, B])
attr = True
class R2(Rule):
rule=([A], [0, B])
attr = 0
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R1, R2])
second = deepcopy(first)
fR1 = first.get_rule(R1)
sR1 = second.get_rule(R1)
self.assertTrue(fR1.attr)
self.assertTrue(sR1.attr)
fR1.attr = False
self.assertFalse(fR1.attr)
self.assertTrue(sR1.attr)
fR2 = first.get_rule(R2)
sR2 = second.get_rule(R2)
self.assertFalse(fR2.attr)
self.assertFalse(sR2.attr)
sR2.attr = True
self.assertFalse(fR2.attr)
self.assertTrue(sR2.attr)
def test_globalChange(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
R.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
if __name__ == '__main__':
main()
|
Add tests of deep copy
|
Add tests of deep copy
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add tests of deep copy
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 18:45
:Licence GNUv3
Part of grammpy
"""
from copy import deepcopy
from unittest import TestCase, main
from grammpy import *
class RulesAddingTest(TestCase):
def test_copyOfSingleRule(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
fR.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
def test_copyOfMoreRules(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R1(Rule):
rule=([A], [0, B])
attr = True
class R2(Rule):
rule=([A], [0, B])
attr = 0
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R1, R2])
second = deepcopy(first)
fR1 = first.get_rule(R1)
sR1 = second.get_rule(R1)
self.assertTrue(fR1.attr)
self.assertTrue(sR1.attr)
fR1.attr = False
self.assertFalse(fR1.attr)
self.assertTrue(sR1.attr)
fR2 = first.get_rule(R2)
sR2 = second.get_rule(R2)
self.assertFalse(fR2.attr)
self.assertFalse(sR2.attr)
sR2.attr = True
self.assertFalse(fR2.attr)
self.assertTrue(sR2.attr)
def test_globalChange(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
R.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tests of deep copy<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 18:45
:Licence GNUv3
Part of grammpy
"""
from copy import deepcopy
from unittest import TestCase, main
from grammpy import *
class RulesAddingTest(TestCase):
def test_copyOfSingleRule(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
fR.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
def test_copyOfMoreRules(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R1(Rule):
rule=([A], [0, B])
attr = True
class R2(Rule):
rule=([A], [0, B])
attr = 0
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R1, R2])
second = deepcopy(first)
fR1 = first.get_rule(R1)
sR1 = second.get_rule(R1)
self.assertTrue(fR1.attr)
self.assertTrue(sR1.attr)
fR1.attr = False
self.assertFalse(fR1.attr)
self.assertTrue(sR1.attr)
fR2 = first.get_rule(R2)
sR2 = second.get_rule(R2)
self.assertFalse(fR2.attr)
self.assertFalse(sR2.attr)
sR2.attr = True
self.assertFalse(fR2.attr)
self.assertTrue(sR2.attr)
def test_globalChange(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
R.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
if __name__ == '__main__':
main()
|
Add tests of deep copy#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 18:45
:Licence GNUv3
Part of grammpy
"""
from copy import deepcopy
from unittest import TestCase, main
from grammpy import *
class RulesAddingTest(TestCase):
def test_copyOfSingleRule(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
fR.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
def test_copyOfMoreRules(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R1(Rule):
rule=([A], [0, B])
attr = True
class R2(Rule):
rule=([A], [0, B])
attr = 0
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R1, R2])
second = deepcopy(first)
fR1 = first.get_rule(R1)
sR1 = second.get_rule(R1)
self.assertTrue(fR1.attr)
self.assertTrue(sR1.attr)
fR1.attr = False
self.assertFalse(fR1.attr)
self.assertTrue(sR1.attr)
fR2 = first.get_rule(R2)
sR2 = second.get_rule(R2)
self.assertFalse(fR2.attr)
self.assertFalse(sR2.attr)
sR2.attr = True
self.assertFalse(fR2.attr)
self.assertTrue(sR2.attr)
def test_globalChange(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
R.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tests of deep copy<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 18:45
:Licence GNUv3
Part of grammpy
"""
from copy import deepcopy
from unittest import TestCase, main
from grammpy import *
class RulesAddingTest(TestCase):
def test_copyOfSingleRule(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
fR.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
def test_copyOfMoreRules(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R1(Rule):
rule=([A], [0, B])
attr = True
class R2(Rule):
rule=([A], [0, B])
attr = 0
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R1, R2])
second = deepcopy(first)
fR1 = first.get_rule(R1)
sR1 = second.get_rule(R1)
self.assertTrue(fR1.attr)
self.assertTrue(sR1.attr)
fR1.attr = False
self.assertFalse(fR1.attr)
self.assertTrue(sR1.attr)
fR2 = first.get_rule(R2)
sR2 = second.get_rule(R2)
self.assertFalse(fR2.attr)
self.assertFalse(sR2.attr)
sR2.attr = True
self.assertFalse(fR2.attr)
self.assertTrue(sR2.attr)
def test_globalChange(self):
class A(Nonterminal): pass
class B(Nonterminal): pass
class R(Rule):
rule=([A], [0, B])
attr = True
first = Grammar(terminals=[0], nonterminals=[A, B], rules=[R])
second = deepcopy(first)
fR = first.get_rule(R)
sR = second.get_rule(R)
self.assertTrue(fR.attr)
self.assertTrue(sR.attr)
R.attr = False
self.assertFalse(fR.attr)
self.assertTrue(sR.attr)
if __name__ == '__main__':
main()
|
|
aeca35135975dbfe9bc807181252754cc08a1f16
|
paasta_tools/firewall.py
|
paasta_tools/firewall.py
|
#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import shlex
import subprocess
class ChainDoesNotExist(Exception):
pass
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has a set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = list_chain(chain)
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
print('adding rule: {}'.format(rule))
add_rule(chain, rule)
for rule in current_rules:
if rule not in rules:
print('deleting rule: {}'.format(rule))
delete_rule(chain, rule)
def add_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def delete_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def create_chain(chain):
subprocess.check_call(('iptables', '-t', 'filter', '-N', chain))
def list_chain(chain):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
cmd = ('iptables', '-t', 'filter', '--list-rules', chain)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
if b'No chain/target/match by that name.\n' in err:
raise ChainDoesNotExist(chain)
else:
raise subprocess.CalledProcessError(proc.returncode, cmd, output=(out, err))
# Parse rules into something usable
rule_regex = re.compile(b'-A {chain} (.+)$'.format(chain=re.escape(chain)))
rules = out.splitlines()
parsed = set()
for rule in rules:
if rule == '-N {}'.format(chain):
continue
m = rule_regex.match(rule)
if not m:
raise ValueError(
'Unable to parse iptables rule: {}'.format(rule),
)
else:
parsed.add(m.group(1))
return parsed
ensure_chain('ckuehl-test-service', [
'-j REJECT --reject-with icmp-port-unreachable',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20666 -j ACCEPT',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20641 -j ACCEPT',
])
|
Add some simple iptables managing functions
|
Add some simple iptables managing functions
The most important of these is `ensure_chain` for creating and managing
the rules on an entire chain.
|
Python
|
apache-2.0
|
somic/paasta,somic/paasta,Yelp/paasta,Yelp/paasta
|
Add some simple iptables managing functions
The most important of these is `ensure_chain` for creating and managing
the rules on an entire chain.
|
#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import shlex
import subprocess
class ChainDoesNotExist(Exception):
pass
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has a set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = list_chain(chain)
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
print('adding rule: {}'.format(rule))
add_rule(chain, rule)
for rule in current_rules:
if rule not in rules:
print('deleting rule: {}'.format(rule))
delete_rule(chain, rule)
def add_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def delete_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def create_chain(chain):
subprocess.check_call(('iptables', '-t', 'filter', '-N', chain))
def list_chain(chain):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
cmd = ('iptables', '-t', 'filter', '--list-rules', chain)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
if b'No chain/target/match by that name.\n' in err:
raise ChainDoesNotExist(chain)
else:
raise subprocess.CalledProcessError(proc.returncode, cmd, output=(out, err))
# Parse rules into something usable
rule_regex = re.compile(b'-A {chain} (.+)$'.format(chain=re.escape(chain)))
rules = out.splitlines()
parsed = set()
for rule in rules:
if rule == '-N {}'.format(chain):
continue
m = rule_regex.match(rule)
if not m:
raise ValueError(
'Unable to parse iptables rule: {}'.format(rule),
)
else:
parsed.add(m.group(1))
return parsed
ensure_chain('ckuehl-test-service', [
'-j REJECT --reject-with icmp-port-unreachable',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20666 -j ACCEPT',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20641 -j ACCEPT',
])
|
<commit_before><commit_msg>Add some simple iptables managing functions
The most important of these is `ensure_chain` for creating and managing
the rules on an entire chain.<commit_after>
|
#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import shlex
import subprocess
class ChainDoesNotExist(Exception):
pass
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has a set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = list_chain(chain)
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
print('adding rule: {}'.format(rule))
add_rule(chain, rule)
for rule in current_rules:
if rule not in rules:
print('deleting rule: {}'.format(rule))
delete_rule(chain, rule)
def add_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def delete_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def create_chain(chain):
subprocess.check_call(('iptables', '-t', 'filter', '-N', chain))
def list_chain(chain):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
cmd = ('iptables', '-t', 'filter', '--list-rules', chain)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
if b'No chain/target/match by that name.\n' in err:
raise ChainDoesNotExist(chain)
else:
raise subprocess.CalledProcessError(proc.returncode, cmd, output=(out, err))
# Parse rules into something usable
rule_regex = re.compile(b'-A {chain} (.+)$'.format(chain=re.escape(chain)))
rules = out.splitlines()
parsed = set()
for rule in rules:
if rule == '-N {}'.format(chain):
continue
m = rule_regex.match(rule)
if not m:
raise ValueError(
'Unable to parse iptables rule: {}'.format(rule),
)
else:
parsed.add(m.group(1))
return parsed
ensure_chain('ckuehl-test-service', [
'-j REJECT --reject-with icmp-port-unreachable',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20666 -j ACCEPT',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20641 -j ACCEPT',
])
|
Add some simple iptables managing functions
The most important of these is `ensure_chain` for creating and managing
the rules on an entire chain.#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import shlex
import subprocess
class ChainDoesNotExist(Exception):
pass
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has a set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = list_chain(chain)
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
print('adding rule: {}'.format(rule))
add_rule(chain, rule)
for rule in current_rules:
if rule not in rules:
print('deleting rule: {}'.format(rule))
delete_rule(chain, rule)
def add_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def delete_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def create_chain(chain):
subprocess.check_call(('iptables', '-t', 'filter', '-N', chain))
def list_chain(chain):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
cmd = ('iptables', '-t', 'filter', '--list-rules', chain)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
if b'No chain/target/match by that name.\n' in err:
raise ChainDoesNotExist(chain)
else:
raise subprocess.CalledProcessError(proc.returncode, cmd, output=(out, err))
# Parse rules into something usable
rule_regex = re.compile(b'-A {chain} (.+)$'.format(chain=re.escape(chain)))
rules = out.splitlines()
parsed = set()
for rule in rules:
if rule == '-N {}'.format(chain):
continue
m = rule_regex.match(rule)
if not m:
raise ValueError(
'Unable to parse iptables rule: {}'.format(rule),
)
else:
parsed.add(m.group(1))
return parsed
ensure_chain('ckuehl-test-service', [
'-j REJECT --reject-with icmp-port-unreachable',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20666 -j ACCEPT',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20641 -j ACCEPT',
])
|
<commit_before><commit_msg>Add some simple iptables managing functions
The most important of these is `ensure_chain` for creating and managing
the rules on an entire chain.<commit_after>#!/usr/bin/env python2.7
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import shlex
import subprocess
class ChainDoesNotExist(Exception):
pass
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has a set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = list_chain(chain)
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
print('adding rule: {}'.format(rule))
add_rule(chain, rule)
for rule in current_rules:
if rule not in rules:
print('deleting rule: {}'.format(rule))
delete_rule(chain, rule)
def add_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def delete_rule(chain, rule):
subprocess.check_call(['iptables', '-t', 'filter', '-I', chain] + shlex.split(rule))
def create_chain(chain):
subprocess.check_call(('iptables', '-t', 'filter', '-N', chain))
def list_chain(chain):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
cmd = ('iptables', '-t', 'filter', '--list-rules', chain)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode != 0:
if b'No chain/target/match by that name.\n' in err:
raise ChainDoesNotExist(chain)
else:
raise subprocess.CalledProcessError(proc.returncode, cmd, output=(out, err))
# Parse rules into something usable
rule_regex = re.compile(b'-A {chain} (.+)$'.format(chain=re.escape(chain)))
rules = out.splitlines()
parsed = set()
for rule in rules:
if rule == '-N {}'.format(chain):
continue
m = rule_regex.match(rule)
if not m:
raise ValueError(
'Unable to parse iptables rule: {}'.format(rule),
)
else:
parsed.add(m.group(1))
return parsed
ensure_chain('ckuehl-test-service', [
'-j REJECT --reject-with icmp-port-unreachable',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20666 -j ACCEPT',
'-d 169.254.255.254/32 -p tcp -m tcp --dport 20641 -j ACCEPT',
])
|
|
1f50ebe397b692356dc4b34c646685badae85223
|
nipype/algorithms/tests/test_auto_Overlap.py
|
nipype/algorithms/tests/test_auto_Overlap.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import Overlap
def test_Overlap_inputs():
input_map = dict(bg_overlap=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mask_volume=dict(),
out_file=dict(usedefault=True,
),
vol_units=dict(mandatory=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
weighting=dict(usedefault=True,
),
)
inputs = Overlap.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Overlap_outputs():
output_map = dict(dice=dict(),
diff_file=dict(),
jaccard=dict(),
labels=dict(),
roi_di=dict(),
roi_ji=dict(),
roi_voldiff=dict(),
volume_difference=dict(),
)
outputs = Overlap.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
Test Overlap not included before
|
Test Overlap not included before
|
Python
|
bsd-3-clause
|
gerddie/nipype,blakedewey/nipype,mick-d/nipype,wanderine/nipype,mick-d/nipype,dgellis90/nipype,JohnGriffiths/nipype,pearsonlab/nipype,mick-d/nipype,pearsonlab/nipype,arokem/nipype,iglpdc/nipype,blakedewey/nipype,carolFrohlich/nipype,carolFrohlich/nipype,sgiavasis/nipype,arokem/nipype,iglpdc/nipype,carolFrohlich/nipype,dgellis90/nipype,sgiavasis/nipype,JohnGriffiths/nipype,iglpdc/nipype,gerddie/nipype,arokem/nipype,Leoniela/nipype,Leoniela/nipype,carolFrohlich/nipype,gerddie/nipype,glatard/nipype,sgiavasis/nipype,Leoniela/nipype,glatard/nipype,pearsonlab/nipype,JohnGriffiths/nipype,JohnGriffiths/nipype,gerddie/nipype,FCP-INDI/nipype,glatard/nipype,grlee77/nipype,arokem/nipype,FCP-INDI/nipype,dgellis90/nipype,wanderine/nipype,wanderine/nipype,iglpdc/nipype,grlee77/nipype,grlee77/nipype,sgiavasis/nipype,glatard/nipype,grlee77/nipype,dgellis90/nipype,blakedewey/nipype,wanderine/nipype,FCP-INDI/nipype,blakedewey/nipype,FCP-INDI/nipype,pearsonlab/nipype,mick-d/nipype
|
Test Overlap not included before
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import Overlap
def test_Overlap_inputs():
input_map = dict(bg_overlap=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mask_volume=dict(),
out_file=dict(usedefault=True,
),
vol_units=dict(mandatory=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
weighting=dict(usedefault=True,
),
)
inputs = Overlap.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Overlap_outputs():
output_map = dict(dice=dict(),
diff_file=dict(),
jaccard=dict(),
labels=dict(),
roi_di=dict(),
roi_ji=dict(),
roi_voldiff=dict(),
volume_difference=dict(),
)
outputs = Overlap.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
<commit_before><commit_msg>Test Overlap not included before<commit_after>
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import Overlap
def test_Overlap_inputs():
input_map = dict(bg_overlap=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mask_volume=dict(),
out_file=dict(usedefault=True,
),
vol_units=dict(mandatory=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
weighting=dict(usedefault=True,
),
)
inputs = Overlap.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Overlap_outputs():
output_map = dict(dice=dict(),
diff_file=dict(),
jaccard=dict(),
labels=dict(),
roi_di=dict(),
roi_ji=dict(),
roi_voldiff=dict(),
volume_difference=dict(),
)
outputs = Overlap.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
Test Overlap not included before# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import Overlap
def test_Overlap_inputs():
input_map = dict(bg_overlap=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mask_volume=dict(),
out_file=dict(usedefault=True,
),
vol_units=dict(mandatory=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
weighting=dict(usedefault=True,
),
)
inputs = Overlap.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Overlap_outputs():
output_map = dict(dice=dict(),
diff_file=dict(),
jaccard=dict(),
labels=dict(),
roi_di=dict(),
roi_ji=dict(),
roi_voldiff=dict(),
volume_difference=dict(),
)
outputs = Overlap.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
<commit_before><commit_msg>Test Overlap not included before<commit_after># AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.algorithms.misc import Overlap
def test_Overlap_inputs():
input_map = dict(bg_overlap=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mask_volume=dict(),
out_file=dict(usedefault=True,
),
vol_units=dict(mandatory=True,
usedefault=True,
),
volume1=dict(mandatory=True,
),
volume2=dict(mandatory=True,
),
weighting=dict(usedefault=True,
),
)
inputs = Overlap.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Overlap_outputs():
output_map = dict(dice=dict(),
diff_file=dict(),
jaccard=dict(),
labels=dict(),
roi_di=dict(),
roi_ji=dict(),
roi_voldiff=dict(),
volume_difference=dict(),
)
outputs = Overlap.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
|
1b7c3b829569acc163f98a93ced6f232c8ab0045
|
users/migrations/0002_user_protected.py
|
users/migrations/0002_user_protected.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='protected',
field=models.BooleanField(default=0),
),
]
|
Add migrations for protected attribute
|
Add migrations for protected attribute
|
Python
|
mit
|
jonathanstallings/cf-django,jonathanstallings/cf-django,jonathanstallings/cf-django
|
Add migrations for protected attribute
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='protected',
field=models.BooleanField(default=0),
),
]
|
<commit_before><commit_msg>Add migrations for protected attribute<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='protected',
field=models.BooleanField(default=0),
),
]
|
Add migrations for protected attribute# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='protected',
field=models.BooleanField(default=0),
),
]
|
<commit_before><commit_msg>Add migrations for protected attribute<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='protected',
field=models.BooleanField(default=0),
),
]
|
|
1ca2f6fa7f07bd043b2d27f5313fa7218700a502
|
zephyr/management/commands/reset_colors.py
|
zephyr/management/commands/reset_colors.py
|
from django.core.management.base import BaseCommand
from zephyr.models import StreamColor, UserProfile, Subscription, Recipient
class Command(BaseCommand):
help = """Reset all colors for a person to the default grey"""
def handle(self, *args, **options):
if not args:
self.print_help("python manage.py", "reset_colors")
exit(1)
for email in args:
user_profile = UserProfile.objects.get(user__email__iexact=email)
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
for sub in subs:
stream_color, _ = StreamColor.objects.get_or_create(subscription=sub)
stream_color.color = StreamColor.DEFAULT_STREAM_COLOR
stream_color.save()
|
Add a management command to reset your stream colors to the default.
|
Add a management command to reset your stream colors to the default.
(imported from commit f6891ad40088bf34686a7d8a2d910a9a0f3be7c2)
|
Python
|
apache-2.0
|
suxinde2009/zulip,eastlhu/zulip,dattatreya303/zulip,zulip/zulip,nicholasbs/zulip,ikasumiwt/zulip,jphilipsen05/zulip,babbage/zulip,dxq-git/zulip,udxxabp/zulip,calvinleenyc/zulip,zhaoweigg/zulip,armooo/zulip,jonesgithub/zulip,pradiptad/zulip,levixie/zulip,paxapy/zulip,wangdeshui/zulip,LAndreas/zulip,PaulPetring/zulip,MayB/zulip,xuxiao/zulip,moria/zulip,brockwhittaker/zulip,eeshangarg/zulip,Qgap/zulip,ericzhou2008/zulip,j831/zulip,Suninus/zulip,suxinde2009/zulip,peiwei/zulip,mdavid/zulip,tiansiyuan/zulip,mdavid/zulip,mahim97/zulip,zwily/zulip,showell/zulip,qq1012803704/zulip,littledogboy/zulip,cosmicAsymmetry/zulip,vakila/zulip,bitemyapp/zulip,tbutter/zulip,hayderimran7/zulip,themass/zulip,Drooids/zulip,Drooids/zulip,wavelets/zulip,cosmicAsymmetry/zulip,kou/zulip,christi3k/zulip,eastlhu/zulip,JPJPJPOPOP/zulip,dnmfarrell/zulip,natanovia/zulip,zachallaun/zulip,alliejones/zulip,xuxiao/zulip,hustlzp/zulip,gkotian/zulip,vakila/zulip,mdavid/zulip,ryanbackman/zulip,mansilladev/zulip,codeKonami/zulip,vakila/zulip,bluesea/zulip,themass/zulip,zorojean/zulip,jphilipsen05/zulip,shrikrishnaholla/zulip,stamhe/zulip,luyifan/zulip,deer-hope/zulip,stamhe/zulip,johnnygaddarr/zulip,fw1121/zulip,ryansnowboarder/zulip,souravbadami/zulip,glovebx/zulip,brainwane/zulip,codeKonami/zulip,JanzTam/zulip,aliceriot/zulip,blaze225/zulip,akuseru/zulip,DazWorrall/zulip,itnihao/zulip,niftynei/zulip,esander91/zulip,MayB/zulip,krtkmj/zulip,arpith/zulip,jrowan/zulip,Galexrt/zulip,atomic-labs/zulip,levixie/zulip,aliceriot/zulip,dhcrzf/zulip,moria/zulip,ashwinirudrappa/zulip,moria/zulip,zorojean/zulip,souravbadami/zulip,dhcrzf/zulip,jimmy54/zulip,shrikrishnaholla/zulip,suxinde2009/zulip,MariaFaBella85/zulip,voidException/zulip,praveenaki/zulip,seapasulli/zulip,nicholasbs/zulip,Gabriel0402/zulip,LeeRisk/zulip,praveenaki/zulip,schatt/zulip,RobotCaleb/zulip,fw1121/zulip,so0k/zulip,willingc/zulip,codeKonami/zulip,moria/zulip,seapasulli/zulip,aliceriot/zulip,brockwhittaker/zulip,JanzTam/zulip,brainwane/zulip,akuseru/zulip,mahim97/zulip,AZtheAsian/zulip,ahmadassaf/zulip,krtkmj/zulip,itnihao/zulip,hafeez3000/zulip,saitodisse/zulip,dotcool/zulip,tiansiyuan/zulip,sup95/zulip,Qgap/zulip,dxq-git/zulip,wdaher/zulip,hafeez3000/zulip,babbage/zulip,Drooids/zulip,wangdeshui/zulip,amanharitsh123/zulip,gigawhitlocks/zulip,xuanhan863/zulip,voidException/zulip,souravbadami/zulip,Jianchun1/zulip,peiwei/zulip,dwrpayne/zulip,sonali0901/zulip,bowlofstew/zulip,rht/zulip,KJin99/zulip,punchagan/zulip,DazWorrall/zulip,kou/zulip,kokoar/zulip,rht/zulip,arpitpanwar/zulip,itnihao/zulip,hackerkid/zulip,swinghu/zulip,swinghu/zulip,rishig/zulip,stamhe/zulip,AZtheAsian/zulip,JanzTam/zulip,timabbott/zulip,dhcrzf/zulip,noroot/zulip,zwily/zulip,wdaher/zulip,tiansiyuan/zulip,xuanhan863/zulip,karamcnair/zulip,ApsOps/zulip,zhaoweigg/zulip,ryansnowboarder/zulip,eastlhu/zulip,zulip/zulip,dattatreya303/zulip,zacps/zulip,huangkebo/zulip,glovebx/zulip,susansls/zulip,JanzTam/zulip,wavelets/zulip,m1ssou/zulip,easyfmxu/zulip,lfranchi/zulip,jimmy54/zulip,calvinleenyc/zulip,EasonYi/zulip,developerfm/zulip,blaze225/zulip,calvinleenyc/zulip,hustlzp/zulip,ahmadassaf/zulip,samatdav/zulip,Diptanshu8/zulip,reyha/zulip,jessedhillon/zulip,xuxiao/zulip,lfranchi/zulip,ikasumiwt/zulip,johnnygaddarr/zulip,Juanvulcano/zulip,easyfmxu/zulip,armooo/zulip,kou/zulip,cosmicAsymmetry/zulip,KJin99/zulip,Cheppers/zulip,aps-sids/zulip,bastianh/zulip,krtkmj/zulip,JPJPJPOPOP/zulip,rht/zulip,m1ssou/zulip,Qgap/zulip,pradiptad/zulip,nicholasbs/zulip,aakash-cr7/zulip,ericzhou2008/zulip,Jianchun1/zulip,zachallaun/zulip,pradiptad/zulip,jeffcao/zulip,eeshangarg/zulip,mansilladev/zulip,krtkmj/zulip,Vallher/zulip,Frouk/zulip,SmartPeople/zulip,wavelets/zulip,amallia/zulip,deer-hope/zulip,jrowan/zulip,he15his/zulip,blaze225/zulip,andersk/zulip,tbutter/zulip,MayB/zulip,dawran6/zulip,moria/zulip,hustlzp/zulip,Diptanshu8/zulip,ipernet/zulip,schatt/zulip,amallia/zulip,Vallher/zulip,udxxabp/zulip,bastianh/zulip,luyifan/zulip,eeshangarg/zulip,Qgap/zulip,stamhe/zulip,themass/zulip,shaunstanislaus/zulip,zacps/zulip,aps-sids/zulip,amallia/zulip,thomasboyt/zulip,DazWorrall/zulip,xuanhan863/zulip,codeKonami/zulip,Gabriel0402/zulip,bluesea/zulip,ryanbackman/zulip,christi3k/zulip,tommyip/zulip,Suninus/zulip,esander91/zulip,Batterfii/zulip,dattatreya303/zulip,zhaoweigg/zulip,vikas-parashar/zulip,yocome/zulip,tdr130/zulip,kaiyuanheshang/zulip,dattatreya303/zulip,Galexrt/zulip,jrowan/zulip,jackrzhang/zulip,aakash-cr7/zulip,zacps/zulip,rishig/zulip,susansls/zulip,dxq-git/zulip,aps-sids/zulip,Frouk/zulip,punchagan/zulip,ApsOps/zulip,ericzhou2008/zulip,mahim97/zulip,calvinleenyc/zulip,glovebx/zulip,jackrzhang/zulip,thomasboyt/zulip,hustlzp/zulip,christi3k/zulip,tommyip/zulip,zwily/zulip,eastlhu/zulip,johnny9/zulip,willingc/zulip,timabbott/zulip,bssrdf/zulip,Juanvulcano/zulip,calvinleenyc/zulip,tbutter/zulip,so0k/zulip,hafeez3000/zulip,willingc/zulip,ryansnowboarder/zulip,joyhchen/zulip,gkotian/zulip,seapasulli/zulip,mansilladev/zulip,yuvipanda/zulip,bastianh/zulip,sonali0901/zulip,ApsOps/zulip,j831/zulip,timabbott/zulip,sup95/zulip,dwrpayne/zulip,peguin40/zulip,Cheppers/zulip,PhilSk/zulip,reyha/zulip,synicalsyntax/zulip,jessedhillon/zulip,codeKonami/zulip,praveenaki/zulip,aliceriot/zulip,dattatreya303/zulip,LAndreas/zulip,hj3938/zulip,adnanh/zulip,natanovia/zulip,MayB/zulip,shubhamdhama/zulip,arpith/zulip,atomic-labs/zulip,bluesea/zulip,dxq-git/zulip,schatt/zulip,grave-w-grave/zulip,amyliu345/zulip,levixie/zulip,esander91/zulip,praveenaki/zulip,bitemyapp/zulip,lfranchi/zulip,dwrpayne/zulip,wweiradio/zulip,wangdeshui/zulip,vabs22/zulip,umkay/zulip,sonali0901/zulip,hj3938/zulip,ryanbackman/zulip,avastu/zulip,shaunstanislaus/zulip,themass/zulip,jerryge/zulip,krtkmj/zulip,timabbott/zulip,jainayush975/zulip,fw1121/zulip,guiquanz/zulip,gigawhitlocks/zulip,TigorC/zulip,levixie/zulip,xuxiao/zulip,sonali0901/zulip,proliming/zulip,amyliu345/zulip,jackrzhang/zulip,ikasumiwt/zulip,gkotian/zulip,jimmy54/zulip,dxq-git/zulip,guiquanz/zulip,willingc/zulip,avastu/zulip,bitemyapp/zulip,johnnygaddarr/zulip,dawran6/zulip,zhaoweigg/zulip,qq1012803704/zulip,dnmfarrell/zulip,LAndreas/zulip,gkotian/zulip,hafeez3000/zulip,alliejones/zulip,easyfmxu/zulip,shrikrishnaholla/zulip,jonesgithub/zulip,Frouk/zulip,shaunstanislaus/zulip,themass/zulip,he15his/zulip,itnihao/zulip,KingxBanana/zulip,ikasumiwt/zulip,zorojean/zulip,bastianh/zulip,armooo/zulip,synicalsyntax/zulip,Suninus/zulip,johnnygaddarr/zulip,brainwane/zulip,arpith/zulip,jackrzhang/zulip,hengqujushi/zulip,dotcool/zulip,karamcnair/zulip,EasonYi/zulip,m1ssou/zulip,joyhchen/zulip,schatt/zulip,jainayush975/zulip,jonesgithub/zulip,paxapy/zulip,johnny9/zulip,developerfm/zulip,jackrzhang/zulip,fw1121/zulip,zwily/zulip,Suninus/zulip,jeffcao/zulip,umkay/zulip,kou/zulip,amanharitsh123/zulip,joshisa/zulip,jrowan/zulip,Galexrt/zulip,developerfm/zulip,MariaFaBella85/zulip,so0k/zulip,huangkebo/zulip,dwrpayne/zulip,akuseru/zulip,hayderimran7/zulip,vabs22/zulip,kokoar/zulip,atomic-labs/zulip,adnanh/zulip,johnny9/zulip,KJin99/zulip,cosmicAsymmetry/zulip,peguin40/zulip,ufosky-server/zulip,dxq-git/zulip,johnnygaddarr/zulip,xuanhan863/zulip,punchagan/zulip,rht/zulip,shaunstanislaus/zulip,KingxBanana/zulip,qq1012803704/zulip,noroot/zulip,joyhchen/zulip,paxapy/zulip,Frouk/zulip,j831/zulip,hackerkid/zulip,ericzhou2008/zulip,wdaher/zulip,umkay/zulip,vikas-parashar/zulip,wdaher/zulip,technicalpickles/zulip,zhaoweigg/zulip,huangkebo/zulip,ahmadassaf/zulip,alliejones/zulip,KingxBanana/zulip,peiwei/zulip,dawran6/zulip,karamcnair/zulip,stamhe/zulip,Diptanshu8/zulip,saitodisse/zulip,natanovia/zulip,Batterfii/zulip,paxapy/zulip,esander91/zulip,jessedhillon/zulip,Gabriel0402/zulip,armooo/zulip,ryanbackman/zulip,arpith/zulip,so0k/zulip,karamcnair/zulip,showell/zulip,arpitpanwar/zulip,Frouk/zulip,swinghu/zulip,adnanh/zulip,brockwhittaker/zulip,yuvipanda/zulip,blaze225/zulip,karamcnair/zulip,ashwinirudrappa/zulip,hayderimran7/zulip,babbage/zulip,gigawhitlocks/zulip,EasonYi/zulip,amanharitsh123/zulip,nicholasbs/zulip,rishig/zulip,vabs22/zulip,amallia/zulip,Diptanshu8/zulip,amyliu345/zulip,showell/zulip,zorojean/zulip,RobotCaleb/zulip,jainayush975/zulip,verma-varsha/zulip,dxq-git/zulip,JanzTam/zulip,vaidap/zulip,glovebx/zulip,zofuthan/zulip,sharmaeklavya2/zulip,so0k/zulip,ahmadassaf/zulip,rht/zulip,wavelets/zulip,yocome/zulip,pradiptad/zulip,dwrpayne/zulip,tbutter/zulip,jeffcao/zulip,adnanh/zulip,samatdav/zulip,babbage/zulip,atomic-labs/zulip,MariaFaBella85/zulip,mdavid/zulip,tiansiyuan/zulip,verma-varsha/zulip,bitemyapp/zulip,firstblade/zulip,bastianh/zulip,lfranchi/zulip,arpitpanwar/zulip,tdr130/zulip,bastianh/zulip,shaunstanislaus/zulip,tommyip/zulip,calvinleenyc/zulip,blaze225/zulip,amallia/zulip,Frouk/zulip,jonesgithub/zulip,akuseru/zulip,vaidap/zulip,ApsOps/zulip,thomasboyt/zulip,noroot/zulip,zulip/zulip,KJin99/zulip,sup95/zulip,mahim97/zulip,Drooids/zulip,hayderimran7/zulip,ApsOps/zulip,bastianh/zulip,zacps/zulip,mohsenSy/zulip,Gabriel0402/zulip,zachallaun/zulip,hj3938/zulip,souravbadami/zulip,rishig/zulip,PhilSk/zulip,LeeRisk/zulip,alliejones/zulip,nicholasbs/zulip,blaze225/zulip,hj3938/zulip,codeKonami/zulip,paxapy/zulip,armooo/zulip,qq1012803704/zulip,hengqujushi/zulip,ericzhou2008/zulip,udxxabp/zulip,he15his/zulip,zorojean/zulip,punchagan/zulip,stamhe/zulip,LAndreas/zulip,zofuthan/zulip,nicholasbs/zulip,technicalpickles/zulip,easyfmxu/zulip,synicalsyntax/zulip,showell/zulip,aakash-cr7/zulip,samatdav/zulip,verma-varsha/zulip,sharmaeklavya2/zulip,Vallher/zulip,wdaher/zulip,dwrpayne/zulip,hackerkid/zulip,bssrdf/zulip,joyhchen/zulip,j831/zulip,dhcrzf/zulip,gkotian/zulip,easyfmxu/zulip,dawran6/zulip,punchagan/zulip,ikasumiwt/zulip,atomic-labs/zulip,xuanhan863/zulip,brainwane/zulip,KJin99/zulip,developerfm/zulip,yocome/zulip,xuanhan863/zulip,luyifan/zulip,tbutter/zulip,Vallher/zulip,yuvipanda/zulip,babbage/zulip,thomasboyt/zulip,adnanh/zulip,deer-hope/zulip,saitodisse/zulip,sup95/zulip,hayderimran7/zulip,saitodisse/zulip,eeshangarg/zulip,arpitpanwar/zulip,jeffcao/zulip,ufosky-server/zulip,ashwinirudrappa/zulip,souravbadami/zulip,xuxiao/zulip,showell/zulip,ryansnowboarder/zulip,kaiyuanheshang/zulip,proliming/zulip,wavelets/zulip,ApsOps/zulip,niftynei/zulip,vikas-parashar/zulip,PaulPetring/zulip,samatdav/zulip,bssrdf/zulip,zofuthan/zulip,stamhe/zulip,christi3k/zulip,jimmy54/zulip,zofuthan/zulip,ipernet/zulip,bluesea/zulip,glovebx/zulip,tdr130/zulip,gigawhitlocks/zulip,jimmy54/zulip,bowlofstew/zulip,qq1012803704/zulip,gkotian/zulip,aps-sids/zulip,technicalpickles/zulip,alliejones/zulip,sharmaeklavya2/zulip,dotcool/zulip,swinghu/zulip,mohsenSy/zulip,sup95/zulip,ahmadassaf/zulip,technicalpickles/zulip,jerryge/zulip,brainwane/zulip,bitemyapp/zulip,samatdav/zulip,amyliu345/zulip,hengqujushi/zulip,LeeRisk/zulip,umkay/zulip,ufosky-server/zulip,PaulPetring/zulip,yuvipanda/zulip,aps-sids/zulip,verma-varsha/zulip,bluesea/zulip,hackerkid/zulip,eeshangarg/zulip,Batterfii/zulip,m1ssou/zulip,zofuthan/zulip,dnmfarrell/zulip,saitodisse/zulip,johnny9/zulip,Galexrt/zulip,nicholasbs/zulip,EasonYi/zulip,aliceriot/zulip,punchagan/zulip,KingxBanana/zulip,gigawhitlocks/zulip,susansls/zulip,hj3938/zulip,babbage/zulip,jeffcao/zulip,akuseru/zulip,noroot/zulip,Cheppers/zulip,cosmicAsymmetry/zulip,lfranchi/zulip,guiquanz/zulip,suxinde2009/zulip,m1ssou/zulip,avastu/zulip,willingc/zulip,proliming/zulip,willingc/zulip,technicalpickles/zulip,ashwinirudrappa/zulip,kou/zulip,joshisa/zulip,hustlzp/zulip,praveenaki/zulip,fw1121/zulip,codeKonami/zulip,jackrzhang/zulip,zwily/zulip,itnihao/zulip,luyifan/zulip,seapasulli/zulip,firstblade/zulip,jerryge/zulip,lfranchi/zulip,Batterfii/zulip,kaiyuanheshang/zulip,jrowan/zulip,fw1121/zulip,DazWorrall/zulip,amyliu345/zulip,shaunstanislaus/zulip,eeshangarg/zulip,MariaFaBella85/zulip,bssrdf/zulip,bitemyapp/zulip,brockwhittaker/zulip,karamcnair/zulip,deer-hope/zulip,Jianchun1/zulip,tiansiyuan/zulip,hj3938/zulip,hackerkid/zulip,ryansnowboarder/zulip,peiwei/zulip,proliming/zulip,LeeRisk/zulip,Jianchun1/zulip,vakila/zulip,isht3/zulip,sharmaeklavya2/zulip,jessedhillon/zulip,hengqujushi/zulip,kaiyuanheshang/zulip,synicalsyntax/zulip,fw1121/zulip,luyifan/zulip,littledogboy/zulip,dawran6/zulip,sup95/zulip,Jianchun1/zulip,dhcrzf/zulip,grave-w-grave/zulip,arpith/zulip,kaiyuanheshang/zulip,amanharitsh123/zulip,zachallaun/zulip,samatdav/zulip,umkay/zulip,punchagan/zulip,KingxBanana/zulip,levixie/zulip,gigawhitlocks/zulip,armooo/zulip,brainwane/zulip,ryanbackman/zulip,amallia/zulip,reyha/zulip,grave-w-grave/zulip,seapasulli/zulip,grave-w-grave/zulip,natanovia/zulip,dhcrzf/zulip,timabbott/zulip,PaulPetring/zulip,littledogboy/zulip,yocome/zulip,themass/zulip,EasonYi/zulip,moria/zulip,proliming/zulip,vaidap/zulip,hustlzp/zulip,Drooids/zulip,dnmfarrell/zulip,PaulPetring/zulip,vikas-parashar/zulip,vakila/zulip,swinghu/zulip,dnmfarrell/zulip,vaidap/zulip,so0k/zulip,Juanvulcano/zulip,timabbott/zulip,isht3/zulip,tommyip/zulip,thomasboyt/zulip,akuseru/zulip,EasonYi/zulip,showell/zulip,ryansnowboarder/zulip,TigorC/zulip,DazWorrall/zulip,Cheppers/zulip,zacps/zulip,kou/zulip,shubhamdhama/zulip,jerryge/zulip,dwrpayne/zulip,developerfm/zulip,sonali0901/zulip,zachallaun/zulip,joyhchen/zulip,babbage/zulip,ipernet/zulip,wavelets/zulip,verma-varsha/zulip,shubhamdhama/zulip,dotcool/zulip,aps-sids/zulip,firstblade/zulip,RobotCaleb/zulip,jphilipsen05/zulip,praveenaki/zulip,MariaFaBella85/zulip,Juanvulcano/zulip,dotcool/zulip,kokoar/zulip,jessedhillon/zulip,shubhamdhama/zulip,xuxiao/zulip,noroot/zulip,shrikrishnaholla/zulip,schatt/zulip,mansilladev/zulip,jessedhillon/zulip,reyha/zulip,j831/zulip,umkay/zulip,bowlofstew/zulip,aps-sids/zulip,hayderimran7/zulip,vikas-parashar/zulip,kaiyuanheshang/zulip,vakila/zulip,peiwei/zulip,KingxBanana/zulip,vakila/zulip,arpith/zulip,he15his/zulip,guiquanz/zulip,Gabriel0402/zulip,bitemyapp/zulip,showell/zulip,arpitpanwar/zulip,tdr130/zulip,suxinde2009/zulip,jainayush975/zulip,Batterfii/zulip,glovebx/zulip,kokoar/zulip,mansilladev/zulip,wweiradio/zulip,zorojean/zulip,susansls/zulip,wangdeshui/zulip,zorojean/zulip,jackrzhang/zulip,wweiradio/zulip,Suninus/zulip,jeffcao/zulip,zwily/zulip,dnmfarrell/zulip,zofuthan/zulip,amallia/zulip,peguin40/zulip,shubhamdhama/zulip,EasonYi/zulip,bowlofstew/zulip,suxinde2009/zulip,noroot/zulip,umkay/zulip,schatt/zulip,ahmadassaf/zulip,littledogboy/zulip,shubhamdhama/zulip,voidException/zulip,mohsenSy/zulip,rishig/zulip,christi3k/zulip,easyfmxu/zulip,ikasumiwt/zulip,paxapy/zulip,huangkebo/zulip,jerryge/zulip,Galexrt/zulip,atomic-labs/zulip,zachallaun/zulip,johnny9/zulip,yuvipanda/zulip,hj3938/zulip,JanzTam/zulip,LeeRisk/zulip,tiansiyuan/zulip,hengqujushi/zulip,mansilladev/zulip,qq1012803704/zulip,vabs22/zulip,peguin40/zulip,littledogboy/zulip,praveenaki/zulip,kokoar/zulip,brockwhittaker/zulip,firstblade/zulip,rishig/zulip,xuanhan863/zulip,udxxabp/zulip,amanharitsh123/zulip,eastlhu/zulip,Suninus/zulip,ashwinirudrappa/zulip,PhilSk/zulip,natanovia/zulip,shrikrishnaholla/zulip,guiquanz/zulip,RobotCaleb/zulip,jimmy54/zulip,Vallher/zulip,wweiradio/zulip,arpitpanwar/zulip,wangdeshui/zulip,rht/zulip,qq1012803704/zulip,andersk/zulip,tommyip/zulip,sharmaeklavya2/zulip,hackerkid/zulip,thomasboyt/zulip,Cheppers/zulip,tbutter/zulip,vaidap/zulip,j831/zulip,seapasulli/zulip,yocome/zulip,hafeez3000/zulip,deer-hope/zulip,yuvipanda/zulip,JanzTam/zulip,levixie/zulip,jerryge/zulip,Juanvulcano/zulip,synicalsyntax/zulip,pradiptad/zulip,Suninus/zulip,TigorC/zulip,LAndreas/zulip,krtkmj/zulip,DazWorrall/zulip,dattatreya303/zulip,LeeRisk/zulip,susansls/zulip,alliejones/zulip,jeffcao/zulip,RobotCaleb/zulip,zhaoweigg/zulip,bluesea/zulip,ashwinirudrappa/zulip,Gabriel0402/zulip,krtkmj/zulip,andersk/zulip,alliejones/zulip,mahim97/zulip,luyifan/zulip,kokoar/zulip,tbutter/zulip,zhaoweigg/zulip,tdr130/zulip,aliceriot/zulip,esander91/zulip,vabs22/zulip,adnanh/zulip,jerryge/zulip,sharmaeklavya2/zulip,aliceriot/zulip,akuseru/zulip,mdavid/zulip,udxxabp/zulip,hafeez3000/zulip,bssrdf/zulip,ufosky-server/zulip,mdavid/zulip,dawran6/zulip,AZtheAsian/zulip,bssrdf/zulip,TigorC/zulip,thomasboyt/zulip,itnihao/zulip,littledogboy/zulip,wangdeshui/zulip,zachallaun/zulip,LAndreas/zulip,zulip/zulip,aakash-cr7/zulip,Galexrt/zulip,mohsenSy/zulip,zulip/zulip,wweiradio/zulip,ahmadassaf/zulip,kokoar/zulip,bowlofstew/zulip,shrikrishnaholla/zulip,ipernet/zulip,peguin40/zulip,Drooids/zulip,SmartPeople/zulip,ufosky-server/zulip,joshisa/zulip,Diptanshu8/zulip,tommyip/zulip,wweiradio/zulip,mohsenSy/zulip,littledogboy/zulip,wavelets/zulip,firstblade/zulip,Frouk/zulip,Drooids/zulip,easyfmxu/zulip,udxxabp/zulip,zulip/zulip,PhilSk/zulip,huangkebo/zulip,hengqujushi/zulip,seapasulli/zulip,christi3k/zulip,bssrdf/zulip,Batterfii/zulip,tdr130/zulip,zulip/zulip,Gabriel0402/zulip,mdavid/zulip,AZtheAsian/zulip,isht3/zulip,he15his/zulip,wdaher/zulip,rishig/zulip,suxinde2009/zulip,susansls/zulip,atomic-labs/zulip,cosmicAsymmetry/zulip,m1ssou/zulip,Cheppers/zulip,zofuthan/zulip,MayB/zulip,schatt/zulip,voidException/zulip,esander91/zulip,JPJPJPOPOP/zulip,hayderimran7/zulip,Vallher/zulip,niftynei/zulip,reyha/zulip,yocome/zulip,wweiradio/zulip,tommyip/zulip,armooo/zulip,itnihao/zulip,SmartPeople/zulip,proliming/zulip,he15his/zulip,johnny9/zulip,kou/zulip,johnny9/zulip,yocome/zulip,SmartPeople/zulip,hengqujushi/zulip,kaiyuanheshang/zulip,eastlhu/zulip,glovebx/zulip,wdaher/zulip,deer-hope/zulip,ikasumiwt/zulip,joshisa/zulip,souravbadami/zulip,firstblade/zulip,grave-w-grave/zulip,SmartPeople/zulip,Galexrt/zulip,themass/zulip,proliming/zulip,developerfm/zulip,noroot/zulip,LeeRisk/zulip,shubhamdhama/zulip,natanovia/zulip,joshisa/zulip,timabbott/zulip,andersk/zulip,ryansnowboarder/zulip,shaunstanislaus/zulip,johnnygaddarr/zulip,synicalsyntax/zulip,jphilipsen05/zulip,peiwei/zulip,jainayush975/zulip,TigorC/zulip,PhilSk/zulip,zacps/zulip,yuvipanda/zulip,ericzhou2008/zulip,jonesgithub/zulip,jphilipsen05/zulip,aakash-cr7/zulip,arpitpanwar/zulip,developerfm/zulip,jimmy54/zulip,MayB/zulip,peiwei/zulip,swinghu/zulip,Cheppers/zulip,vaidap/zulip,ipernet/zulip,niftynei/zulip,KJin99/zulip,vabs22/zulip,zwily/zulip,Qgap/zulip,deer-hope/zulip,pradiptad/zulip,andersk/zulip,shrikrishnaholla/zulip,brockwhittaker/zulip,adnanh/zulip,Qgap/zulip,KJin99/zulip,aakash-cr7/zulip,ApsOps/zulip,mohsenSy/zulip,voidException/zulip,niftynei/zulip,jainayush975/zulip,RobotCaleb/zulip,dhcrzf/zulip,hustlzp/zulip,grave-w-grave/zulip,niftynei/zulip,eeshangarg/zulip,bowlofstew/zulip,ufosky-server/zulip,gkotian/zulip,gigawhitlocks/zulip,johnnygaddarr/zulip,voidException/zulip,joyhchen/zulip,LAndreas/zulip,andersk/zulip,firstblade/zulip,esander91/zulip,Batterfii/zulip,ipernet/zulip,bluesea/zulip,he15his/zulip,DazWorrall/zulip,amyliu345/zulip,TigorC/zulip,tdr130/zulip,pradiptad/zulip,joshisa/zulip,isht3/zulip,ufosky-server/zulip,amanharitsh123/zulip,vikas-parashar/zulip,technicalpickles/zulip,mahim97/zulip,brainwane/zulip,andersk/zulip,eastlhu/zulip,swinghu/zulip,hackerkid/zulip,PaulPetring/zulip,JPJPJPOPOP/zulip,tiansiyuan/zulip,MayB/zulip,MariaFaBella85/zulip,reyha/zulip,SmartPeople/zulip,JPJPJPOPOP/zulip,MariaFaBella85/zulip,PhilSk/zulip,lfranchi/zulip,Jianchun1/zulip,jphilipsen05/zulip,karamcnair/zulip,xuxiao/zulip,Qgap/zulip,guiquanz/zulip,sonali0901/zulip,saitodisse/zulip,dotcool/zulip,joshisa/zulip,natanovia/zulip,peguin40/zulip,wangdeshui/zulip,voidException/zulip,jonesgithub/zulip,avastu/zulip,avastu/zulip,levixie/zulip,moria/zulip,so0k/zulip,udxxabp/zulip,luyifan/zulip,huangkebo/zulip,Diptanshu8/zulip,ryanbackman/zulip,Juanvulcano/zulip,Vallher/zulip,dnmfarrell/zulip,bowlofstew/zulip,rht/zulip,avastu/zulip,m1ssou/zulip,jessedhillon/zulip,AZtheAsian/zulip,isht3/zulip,AZtheAsian/zulip,dotcool/zulip,jrowan/zulip,willingc/zulip,saitodisse/zulip,JPJPJPOPOP/zulip,mansilladev/zulip,hafeez3000/zulip,technicalpickles/zulip,avastu/zulip,PaulPetring/zulip,ipernet/zulip,isht3/zulip,ericzhou2008/zulip,verma-varsha/zulip,RobotCaleb/zulip,guiquanz/zulip,huangkebo/zulip,ashwinirudrappa/zulip,synicalsyntax/zulip,jonesgithub/zulip
|
Add a management command to reset your stream colors to the default.
(imported from commit f6891ad40088bf34686a7d8a2d910a9a0f3be7c2)
|
from django.core.management.base import BaseCommand
from zephyr.models import StreamColor, UserProfile, Subscription, Recipient
class Command(BaseCommand):
help = """Reset all colors for a person to the default grey"""
def handle(self, *args, **options):
if not args:
self.print_help("python manage.py", "reset_colors")
exit(1)
for email in args:
user_profile = UserProfile.objects.get(user__email__iexact=email)
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
for sub in subs:
stream_color, _ = StreamColor.objects.get_or_create(subscription=sub)
stream_color.color = StreamColor.DEFAULT_STREAM_COLOR
stream_color.save()
|
<commit_before><commit_msg>Add a management command to reset your stream colors to the default.
(imported from commit f6891ad40088bf34686a7d8a2d910a9a0f3be7c2)<commit_after>
|
from django.core.management.base import BaseCommand
from zephyr.models import StreamColor, UserProfile, Subscription, Recipient
class Command(BaseCommand):
help = """Reset all colors for a person to the default grey"""
def handle(self, *args, **options):
if not args:
self.print_help("python manage.py", "reset_colors")
exit(1)
for email in args:
user_profile = UserProfile.objects.get(user__email__iexact=email)
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
for sub in subs:
stream_color, _ = StreamColor.objects.get_or_create(subscription=sub)
stream_color.color = StreamColor.DEFAULT_STREAM_COLOR
stream_color.save()
|
Add a management command to reset your stream colors to the default.
(imported from commit f6891ad40088bf34686a7d8a2d910a9a0f3be7c2)from django.core.management.base import BaseCommand
from zephyr.models import StreamColor, UserProfile, Subscription, Recipient
class Command(BaseCommand):
help = """Reset all colors for a person to the default grey"""
def handle(self, *args, **options):
if not args:
self.print_help("python manage.py", "reset_colors")
exit(1)
for email in args:
user_profile = UserProfile.objects.get(user__email__iexact=email)
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
for sub in subs:
stream_color, _ = StreamColor.objects.get_or_create(subscription=sub)
stream_color.color = StreamColor.DEFAULT_STREAM_COLOR
stream_color.save()
|
<commit_before><commit_msg>Add a management command to reset your stream colors to the default.
(imported from commit f6891ad40088bf34686a7d8a2d910a9a0f3be7c2)<commit_after>from django.core.management.base import BaseCommand
from zephyr.models import StreamColor, UserProfile, Subscription, Recipient
class Command(BaseCommand):
help = """Reset all colors for a person to the default grey"""
def handle(self, *args, **options):
if not args:
self.print_help("python manage.py", "reset_colors")
exit(1)
for email in args:
user_profile = UserProfile.objects.get(user__email__iexact=email)
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
for sub in subs:
stream_color, _ = StreamColor.objects.get_or_create(subscription=sub)
stream_color.color = StreamColor.DEFAULT_STREAM_COLOR
stream_color.save()
|
|
8e65780c50dd97eb9453f6f1d9dc4b74ba4c8e3f
|
backend/simulator/random_series_functions.py
|
backend/simulator/random_series_functions.py
|
import numpy as np
from simulator.series_functions import SinSeries, ConstantSeries
class RandomSinSeries(SinSeries):
AMPLITUDE_LOW = 1.0
AMPLITUDE_HIGH = 10.0
FREQUENCY_LOW = 0.1
FREQUENCY_HIGH = 2.0
def __init__(self, create_ts, update_period):
amplitude = np.random.uniform(self.AMPLITUDE_LOW, self.AMPLITUDE_HIGH, 1)[0]
frequency = np.random.uniform(self.FREQUENCY_LOW, self.FREQUENCY_HIGH, 1)[0]
phase = np.random.uniform(0.0, 2 * np.pi, 1)[0]
super(RandomSinSeries, self).__init__(create_ts, update_period, amplitude, frequency, phase)
class RandomConstantSeries(ConstantSeries):
CONSTANT_LOW = -5.0
CONSTANT_HIGH = 5.0
def __init__(self, create_ts, update_period):
constant = np.random.uniform(self.CONSTANT_LOW, self.CONSTANT_HIGH, 1)[0]
super(RandomConstantSeries, self).__init__(create_ts, update_period, constant)
|
Add random versions of series_functions
|
Add random versions of series_functions
|
Python
|
mit
|
qiubit/luminis,qiubit/luminis,qiubit/luminis,qiubit/luminis
|
Add random versions of series_functions
|
import numpy as np
from simulator.series_functions import SinSeries, ConstantSeries
class RandomSinSeries(SinSeries):
AMPLITUDE_LOW = 1.0
AMPLITUDE_HIGH = 10.0
FREQUENCY_LOW = 0.1
FREQUENCY_HIGH = 2.0
def __init__(self, create_ts, update_period):
amplitude = np.random.uniform(self.AMPLITUDE_LOW, self.AMPLITUDE_HIGH, 1)[0]
frequency = np.random.uniform(self.FREQUENCY_LOW, self.FREQUENCY_HIGH, 1)[0]
phase = np.random.uniform(0.0, 2 * np.pi, 1)[0]
super(RandomSinSeries, self).__init__(create_ts, update_period, amplitude, frequency, phase)
class RandomConstantSeries(ConstantSeries):
CONSTANT_LOW = -5.0
CONSTANT_HIGH = 5.0
def __init__(self, create_ts, update_period):
constant = np.random.uniform(self.CONSTANT_LOW, self.CONSTANT_HIGH, 1)[0]
super(RandomConstantSeries, self).__init__(create_ts, update_period, constant)
|
<commit_before><commit_msg>Add random versions of series_functions<commit_after>
|
import numpy as np
from simulator.series_functions import SinSeries, ConstantSeries
class RandomSinSeries(SinSeries):
AMPLITUDE_LOW = 1.0
AMPLITUDE_HIGH = 10.0
FREQUENCY_LOW = 0.1
FREQUENCY_HIGH = 2.0
def __init__(self, create_ts, update_period):
amplitude = np.random.uniform(self.AMPLITUDE_LOW, self.AMPLITUDE_HIGH, 1)[0]
frequency = np.random.uniform(self.FREQUENCY_LOW, self.FREQUENCY_HIGH, 1)[0]
phase = np.random.uniform(0.0, 2 * np.pi, 1)[0]
super(RandomSinSeries, self).__init__(create_ts, update_period, amplitude, frequency, phase)
class RandomConstantSeries(ConstantSeries):
CONSTANT_LOW = -5.0
CONSTANT_HIGH = 5.0
def __init__(self, create_ts, update_period):
constant = np.random.uniform(self.CONSTANT_LOW, self.CONSTANT_HIGH, 1)[0]
super(RandomConstantSeries, self).__init__(create_ts, update_period, constant)
|
Add random versions of series_functionsimport numpy as np
from simulator.series_functions import SinSeries, ConstantSeries
class RandomSinSeries(SinSeries):
AMPLITUDE_LOW = 1.0
AMPLITUDE_HIGH = 10.0
FREQUENCY_LOW = 0.1
FREQUENCY_HIGH = 2.0
def __init__(self, create_ts, update_period):
amplitude = np.random.uniform(self.AMPLITUDE_LOW, self.AMPLITUDE_HIGH, 1)[0]
frequency = np.random.uniform(self.FREQUENCY_LOW, self.FREQUENCY_HIGH, 1)[0]
phase = np.random.uniform(0.0, 2 * np.pi, 1)[0]
super(RandomSinSeries, self).__init__(create_ts, update_period, amplitude, frequency, phase)
class RandomConstantSeries(ConstantSeries):
CONSTANT_LOW = -5.0
CONSTANT_HIGH = 5.0
def __init__(self, create_ts, update_period):
constant = np.random.uniform(self.CONSTANT_LOW, self.CONSTANT_HIGH, 1)[0]
super(RandomConstantSeries, self).__init__(create_ts, update_period, constant)
|
<commit_before><commit_msg>Add random versions of series_functions<commit_after>import numpy as np
from simulator.series_functions import SinSeries, ConstantSeries
class RandomSinSeries(SinSeries):
AMPLITUDE_LOW = 1.0
AMPLITUDE_HIGH = 10.0
FREQUENCY_LOW = 0.1
FREQUENCY_HIGH = 2.0
def __init__(self, create_ts, update_period):
amplitude = np.random.uniform(self.AMPLITUDE_LOW, self.AMPLITUDE_HIGH, 1)[0]
frequency = np.random.uniform(self.FREQUENCY_LOW, self.FREQUENCY_HIGH, 1)[0]
phase = np.random.uniform(0.0, 2 * np.pi, 1)[0]
super(RandomSinSeries, self).__init__(create_ts, update_period, amplitude, frequency, phase)
class RandomConstantSeries(ConstantSeries):
CONSTANT_LOW = -5.0
CONSTANT_HIGH = 5.0
def __init__(self, create_ts, update_period):
constant = np.random.uniform(self.CONSTANT_LOW, self.CONSTANT_HIGH, 1)[0]
super(RandomConstantSeries, self).__init__(create_ts, update_period, constant)
|
|
a3c5becc0e2268714228c0a9db613cbfa46de7f7
|
calaccess_raw/migrations/0014_auto_20170421_1821.py
|
calaccess_raw/migrations/0014_auto_20170421_1821.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 18:21
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0013_auto_20161123_2219'),
]
operations = [
migrations.AlterModelOptions(
name='rawdatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS raw data file'},
),
migrations.AlterModelOptions(
name='rawdataversion',
options={'get_latest_by': 'release_datetime', 'ordering': ('-release_datetime',), 'verbose_name': 'TRACKING: CAL-ACCESS raw data version'},
),
migrations.AlterField(
model_name='rcptcd',
name='tran_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[('F', 'Forgiven Loan'), ('I', 'Intermediary'), ('R', 'Returned'), ('T', 'Third Party Repayment'), ('X', 'Transfer'), ('0', 'Unknown'), ('M', 'Unknown'), ('N', 'Unknown')], db_column='TRAN_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=72), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=38)], help_text='Transaction Type', max_length=1, verbose_name='transaction type'),
),
]
|
Add migration for RawDataVersion/File options, rcptcd.tran_type field options
|
Add migration for RawDataVersion/File options, rcptcd.tran_type field options
|
Python
|
mit
|
california-civic-data-coalition/django-calaccess-raw-data
|
Add migration for RawDataVersion/File options, rcptcd.tran_type field options
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 18:21
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0013_auto_20161123_2219'),
]
operations = [
migrations.AlterModelOptions(
name='rawdatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS raw data file'},
),
migrations.AlterModelOptions(
name='rawdataversion',
options={'get_latest_by': 'release_datetime', 'ordering': ('-release_datetime',), 'verbose_name': 'TRACKING: CAL-ACCESS raw data version'},
),
migrations.AlterField(
model_name='rcptcd',
name='tran_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[('F', 'Forgiven Loan'), ('I', 'Intermediary'), ('R', 'Returned'), ('T', 'Third Party Repayment'), ('X', 'Transfer'), ('0', 'Unknown'), ('M', 'Unknown'), ('N', 'Unknown')], db_column='TRAN_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=72), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=38)], help_text='Transaction Type', max_length=1, verbose_name='transaction type'),
),
]
|
<commit_before><commit_msg>Add migration for RawDataVersion/File options, rcptcd.tran_type field options<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 18:21
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0013_auto_20161123_2219'),
]
operations = [
migrations.AlterModelOptions(
name='rawdatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS raw data file'},
),
migrations.AlterModelOptions(
name='rawdataversion',
options={'get_latest_by': 'release_datetime', 'ordering': ('-release_datetime',), 'verbose_name': 'TRACKING: CAL-ACCESS raw data version'},
),
migrations.AlterField(
model_name='rcptcd',
name='tran_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[('F', 'Forgiven Loan'), ('I', 'Intermediary'), ('R', 'Returned'), ('T', 'Third Party Repayment'), ('X', 'Transfer'), ('0', 'Unknown'), ('M', 'Unknown'), ('N', 'Unknown')], db_column='TRAN_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=72), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=38)], help_text='Transaction Type', max_length=1, verbose_name='transaction type'),
),
]
|
Add migration for RawDataVersion/File options, rcptcd.tran_type field options# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 18:21
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0013_auto_20161123_2219'),
]
operations = [
migrations.AlterModelOptions(
name='rawdatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS raw data file'},
),
migrations.AlterModelOptions(
name='rawdataversion',
options={'get_latest_by': 'release_datetime', 'ordering': ('-release_datetime',), 'verbose_name': 'TRACKING: CAL-ACCESS raw data version'},
),
migrations.AlterField(
model_name='rcptcd',
name='tran_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[('F', 'Forgiven Loan'), ('I', 'Intermediary'), ('R', 'Returned'), ('T', 'Third Party Repayment'), ('X', 'Transfer'), ('0', 'Unknown'), ('M', 'Unknown'), ('N', 'Unknown')], db_column='TRAN_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=72), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=38)], help_text='Transaction Type', max_length=1, verbose_name='transaction type'),
),
]
|
<commit_before><commit_msg>Add migration for RawDataVersion/File options, rcptcd.tran_type field options<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-21 18:21
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0013_auto_20161123_2219'),
]
operations = [
migrations.AlterModelOptions(
name='rawdatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS raw data file'},
),
migrations.AlterModelOptions(
name='rawdataversion',
options={'get_latest_by': 'release_datetime', 'ordering': ('-release_datetime',), 'verbose_name': 'TRACKING: CAL-ACCESS raw data version'},
),
migrations.AlterField(
model_name='rcptcd',
name='tran_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[('F', 'Forgiven Loan'), ('I', 'Intermediary'), ('R', 'Returned'), ('T', 'Third Party Repayment'), ('X', 'Transfer'), ('0', 'Unknown'), ('M', 'Unknown'), ('N', 'Unknown')], db_column='TRAN_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=72), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=38)], help_text='Transaction Type', max_length=1, verbose_name='transaction type'),
),
]
|
|
5bb96cd91809669b200d8a97ef80a6dcb6787781
|
scripts/anonscrobbles.py
|
scripts/anonscrobbles.py
|
#!/usr/bin/env python
import random
s = open("scrobbledump.sql", "r")
o = open("scrobbles.anonymous.sql", "w")
datasection = False
usermap = {}
#track, artist, "time", mbid, album, source, rating, length, stid, userid, track_tsv, artist_tsv
for line in s.readlines():
if line.rstrip() == "\.":
datasection = False
if datasection:
data = line.split("\t")
uid = data[9]
if uid in usermap:
data[9] = str(usermap[uid])
else:
newid = random.randint(0, 1000000)
while newid in usermap:
newid = random.randint(0, 1000000)
usermap[uid] = newid
data[9] = str(newid)
o.write("\t".join(data))
else:
o.write(line)
if line[:4] == "COPY":
datasection = True
s.close()
o.close()
|
Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency
|
Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency
|
#!/usr/bin/env python
import random
s = open("scrobbledump.sql", "r")
o = open("scrobbles.anonymous.sql", "w")
datasection = False
usermap = {}
#track, artist, "time", mbid, album, source, rating, length, stid, userid, track_tsv, artist_tsv
for line in s.readlines():
if line.rstrip() == "\.":
datasection = False
if datasection:
data = line.split("\t")
uid = data[9]
if uid in usermap:
data[9] = str(usermap[uid])
else:
newid = random.randint(0, 1000000)
while newid in usermap:
newid = random.randint(0, 1000000)
usermap[uid] = newid
data[9] = str(newid)
o.write("\t".join(data))
else:
o.write(line)
if line[:4] == "COPY":
datasection = True
s.close()
o.close()
|
<commit_before><commit_msg>Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency<commit_after>
|
#!/usr/bin/env python
import random
s = open("scrobbledump.sql", "r")
o = open("scrobbles.anonymous.sql", "w")
datasection = False
usermap = {}
#track, artist, "time", mbid, album, source, rating, length, stid, userid, track_tsv, artist_tsv
for line in s.readlines():
if line.rstrip() == "\.":
datasection = False
if datasection:
data = line.split("\t")
uid = data[9]
if uid in usermap:
data[9] = str(usermap[uid])
else:
newid = random.randint(0, 1000000)
while newid in usermap:
newid = random.randint(0, 1000000)
usermap[uid] = newid
data[9] = str(newid)
o.write("\t".join(data))
else:
o.write(line)
if line[:4] == "COPY":
datasection = True
s.close()
o.close()
|
Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency#!/usr/bin/env python
import random
s = open("scrobbledump.sql", "r")
o = open("scrobbles.anonymous.sql", "w")
datasection = False
usermap = {}
#track, artist, "time", mbid, album, source, rating, length, stid, userid, track_tsv, artist_tsv
for line in s.readlines():
if line.rstrip() == "\.":
datasection = False
if datasection:
data = line.split("\t")
uid = data[9]
if uid in usermap:
data[9] = str(usermap[uid])
else:
newid = random.randint(0, 1000000)
while newid in usermap:
newid = random.randint(0, 1000000)
usermap[uid] = newid
data[9] = str(newid)
o.write("\t".join(data))
else:
o.write(line)
if line[:4] == "COPY":
datasection = True
s.close()
o.close()
|
<commit_before><commit_msg>Add hacky script for anonymising dumps of the Scrobbles table whilst still maintaining internal consistency<commit_after>#!/usr/bin/env python
import random
s = open("scrobbledump.sql", "r")
o = open("scrobbles.anonymous.sql", "w")
datasection = False
usermap = {}
#track, artist, "time", mbid, album, source, rating, length, stid, userid, track_tsv, artist_tsv
for line in s.readlines():
if line.rstrip() == "\.":
datasection = False
if datasection:
data = line.split("\t")
uid = data[9]
if uid in usermap:
data[9] = str(usermap[uid])
else:
newid = random.randint(0, 1000000)
while newid in usermap:
newid = random.randint(0, 1000000)
usermap[uid] = newid
data[9] = str(newid)
o.write("\t".join(data))
else:
o.write(line)
if line[:4] == "COPY":
datasection = True
s.close()
o.close()
|
|
7da0a22a6533dc93da23dfa5025cb5172496c97f
|
sorbic/utils/traverse.py
|
sorbic/utils/traverse.py
|
# -*- coding: utf-8 -*-
'''
Traversal algorithms, used to traverse data stuctures such as can be found in
datbase documents
'''
DEFAULT_TARGET_DELIM = ':'
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
Add traversal lib to utils
|
Add traversal lib to utils
|
Python
|
apache-2.0
|
thatch45/sorbic,s0undt3ch/sorbic
|
Add traversal lib to utils
|
# -*- coding: utf-8 -*-
'''
Traversal algorithms, used to traverse data stuctures such as can be found in
datbase documents
'''
DEFAULT_TARGET_DELIM = ':'
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
<commit_before><commit_msg>Add traversal lib to utils<commit_after>
|
# -*- coding: utf-8 -*-
'''
Traversal algorithms, used to traverse data stuctures such as can be found in
datbase documents
'''
DEFAULT_TARGET_DELIM = ':'
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
Add traversal lib to utils# -*- coding: utf-8 -*-
'''
Traversal algorithms, used to traverse data stuctures such as can be found in
datbase documents
'''
DEFAULT_TARGET_DELIM = ':'
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
<commit_before><commit_msg>Add traversal lib to utils<commit_after># -*- coding: utf-8 -*-
'''
Traversal algorithms, used to traverse data stuctures such as can be found in
datbase documents
'''
DEFAULT_TARGET_DELIM = ':'
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
|
|
4b50f0c57a99fbe5bff4202c0a22ad55b923dd6c
|
simulations/replayLog.py
|
simulations/replayLog.py
|
import pickle
import logging
from parseMaildir import Email
logging.basicConfig(level=logging.INFO) # Set to .DEBUG for gory details
# Parse the pickles generated by parseMaildir.py
parsedLogsFolder = 'Enron/parsing/'
social = pickle.load(open(parsedLogsFolder + "social.pkl", "rb"))
log = pickle.load(open(parsedLogsFolder + "replay_log.pkl", "rb"))
def do_replay_log():
# Set of users we know the social graph for
enron_users = set([])
# Dictionary to keep track of the latest head known to people
chain_head_dict = {}
# Initialize the latest known head dictionary
for user in social:
enron_users.add(user['user'])
chain_head_dict[user['user'], user['user']] = 1
for friend in user['friends']:
chain_head_dict[(user['user'], friend)] = 0
for email in log:
if email.From not in enron_users:
continue
for recipient in email.To | email.Cc | email.Bcc - {email.From}:
# For all recipients, update the dict entry for the sender
chain_head_dict[(recipient, email.From)] = chain_head_dict[email.From, email.From]
logging.debug("User %s updated the head for user %s" % (recipient, email.From))
# For all recipients, update the dict entries for the public recipients
for other in email.To | email.Cc - {recipient}:
if (email.From, other) in chain_head_dict:
if not (recipient, other) in chain_head_dict:
chain_head_dict[(recipient, other)] = chain_head_dict[recipient, other]
logging.debug("User %s updated the head for user %s" % (recipient, other))
return chain_head_dict
def main():
known_head = do_replay_log()
updated = 0
not_updated = 0
for user, friend in known_head:
if user == friend:
continue
if known_head[(user, friend)] == known_head[(user, user)]:
updated += 1
else:
not_updated += 1
print "There were %s updates on the latest head dictionary, but %s entries are stale." % (updated, not_updated)
if __name__ == "__main__":
main()
|
Add simulation script for ClaimChain in Autocrypt mode
|
Add simulation script for ClaimChain in Autocrypt mode
|
Python
|
mit
|
gdanezis/claimchain-core
|
Add simulation script for ClaimChain in Autocrypt mode
|
import pickle
import logging
from parseMaildir import Email
logging.basicConfig(level=logging.INFO) # Set to .DEBUG for gory details
# Parse the pickles generated by parseMaildir.py
parsedLogsFolder = 'Enron/parsing/'
social = pickle.load(open(parsedLogsFolder + "social.pkl", "rb"))
log = pickle.load(open(parsedLogsFolder + "replay_log.pkl", "rb"))
def do_replay_log():
# Set of users we know the social graph for
enron_users = set([])
# Dictionary to keep track of the latest head known to people
chain_head_dict = {}
# Initialize the latest known head dictionary
for user in social:
enron_users.add(user['user'])
chain_head_dict[user['user'], user['user']] = 1
for friend in user['friends']:
chain_head_dict[(user['user'], friend)] = 0
for email in log:
if email.From not in enron_users:
continue
for recipient in email.To | email.Cc | email.Bcc - {email.From}:
# For all recipients, update the dict entry for the sender
chain_head_dict[(recipient, email.From)] = chain_head_dict[email.From, email.From]
logging.debug("User %s updated the head for user %s" % (recipient, email.From))
# For all recipients, update the dict entries for the public recipients
for other in email.To | email.Cc - {recipient}:
if (email.From, other) in chain_head_dict:
if not (recipient, other) in chain_head_dict:
chain_head_dict[(recipient, other)] = chain_head_dict[recipient, other]
logging.debug("User %s updated the head for user %s" % (recipient, other))
return chain_head_dict
def main():
known_head = do_replay_log()
updated = 0
not_updated = 0
for user, friend in known_head:
if user == friend:
continue
if known_head[(user, friend)] == known_head[(user, user)]:
updated += 1
else:
not_updated += 1
print "There were %s updates on the latest head dictionary, but %s entries are stale." % (updated, not_updated)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add simulation script for ClaimChain in Autocrypt mode<commit_after>
|
import pickle
import logging
from parseMaildir import Email
logging.basicConfig(level=logging.INFO) # Set to .DEBUG for gory details
# Parse the pickles generated by parseMaildir.py
parsedLogsFolder = 'Enron/parsing/'
social = pickle.load(open(parsedLogsFolder + "social.pkl", "rb"))
log = pickle.load(open(parsedLogsFolder + "replay_log.pkl", "rb"))
def do_replay_log():
# Set of users we know the social graph for
enron_users = set([])
# Dictionary to keep track of the latest head known to people
chain_head_dict = {}
# Initialize the latest known head dictionary
for user in social:
enron_users.add(user['user'])
chain_head_dict[user['user'], user['user']] = 1
for friend in user['friends']:
chain_head_dict[(user['user'], friend)] = 0
for email in log:
if email.From not in enron_users:
continue
for recipient in email.To | email.Cc | email.Bcc - {email.From}:
# For all recipients, update the dict entry for the sender
chain_head_dict[(recipient, email.From)] = chain_head_dict[email.From, email.From]
logging.debug("User %s updated the head for user %s" % (recipient, email.From))
# For all recipients, update the dict entries for the public recipients
for other in email.To | email.Cc - {recipient}:
if (email.From, other) in chain_head_dict:
if not (recipient, other) in chain_head_dict:
chain_head_dict[(recipient, other)] = chain_head_dict[recipient, other]
logging.debug("User %s updated the head for user %s" % (recipient, other))
return chain_head_dict
def main():
known_head = do_replay_log()
updated = 0
not_updated = 0
for user, friend in known_head:
if user == friend:
continue
if known_head[(user, friend)] == known_head[(user, user)]:
updated += 1
else:
not_updated += 1
print "There were %s updates on the latest head dictionary, but %s entries are stale." % (updated, not_updated)
if __name__ == "__main__":
main()
|
Add simulation script for ClaimChain in Autocrypt modeimport pickle
import logging
from parseMaildir import Email
logging.basicConfig(level=logging.INFO) # Set to .DEBUG for gory details
# Parse the pickles generated by parseMaildir.py
parsedLogsFolder = 'Enron/parsing/'
social = pickle.load(open(parsedLogsFolder + "social.pkl", "rb"))
log = pickle.load(open(parsedLogsFolder + "replay_log.pkl", "rb"))
def do_replay_log():
# Set of users we know the social graph for
enron_users = set([])
# Dictionary to keep track of the latest head known to people
chain_head_dict = {}
# Initialize the latest known head dictionary
for user in social:
enron_users.add(user['user'])
chain_head_dict[user['user'], user['user']] = 1
for friend in user['friends']:
chain_head_dict[(user['user'], friend)] = 0
for email in log:
if email.From not in enron_users:
continue
for recipient in email.To | email.Cc | email.Bcc - {email.From}:
# For all recipients, update the dict entry for the sender
chain_head_dict[(recipient, email.From)] = chain_head_dict[email.From, email.From]
logging.debug("User %s updated the head for user %s" % (recipient, email.From))
# For all recipients, update the dict entries for the public recipients
for other in email.To | email.Cc - {recipient}:
if (email.From, other) in chain_head_dict:
if not (recipient, other) in chain_head_dict:
chain_head_dict[(recipient, other)] = chain_head_dict[recipient, other]
logging.debug("User %s updated the head for user %s" % (recipient, other))
return chain_head_dict
def main():
known_head = do_replay_log()
updated = 0
not_updated = 0
for user, friend in known_head:
if user == friend:
continue
if known_head[(user, friend)] == known_head[(user, user)]:
updated += 1
else:
not_updated += 1
print "There were %s updates on the latest head dictionary, but %s entries are stale." % (updated, not_updated)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add simulation script for ClaimChain in Autocrypt mode<commit_after>import pickle
import logging
from parseMaildir import Email
logging.basicConfig(level=logging.INFO) # Set to .DEBUG for gory details
# Parse the pickles generated by parseMaildir.py
parsedLogsFolder = 'Enron/parsing/'
social = pickle.load(open(parsedLogsFolder + "social.pkl", "rb"))
log = pickle.load(open(parsedLogsFolder + "replay_log.pkl", "rb"))
def do_replay_log():
# Set of users we know the social graph for
enron_users = set([])
# Dictionary to keep track of the latest head known to people
chain_head_dict = {}
# Initialize the latest known head dictionary
for user in social:
enron_users.add(user['user'])
chain_head_dict[user['user'], user['user']] = 1
for friend in user['friends']:
chain_head_dict[(user['user'], friend)] = 0
for email in log:
if email.From not in enron_users:
continue
for recipient in email.To | email.Cc | email.Bcc - {email.From}:
# For all recipients, update the dict entry for the sender
chain_head_dict[(recipient, email.From)] = chain_head_dict[email.From, email.From]
logging.debug("User %s updated the head for user %s" % (recipient, email.From))
# For all recipients, update the dict entries for the public recipients
for other in email.To | email.Cc - {recipient}:
if (email.From, other) in chain_head_dict:
if not (recipient, other) in chain_head_dict:
chain_head_dict[(recipient, other)] = chain_head_dict[recipient, other]
logging.debug("User %s updated the head for user %s" % (recipient, other))
return chain_head_dict
def main():
known_head = do_replay_log()
updated = 0
not_updated = 0
for user, friend in known_head:
if user == friend:
continue
if known_head[(user, friend)] == known_head[(user, user)]:
updated += 1
else:
not_updated += 1
print "There were %s updates on the latest head dictionary, but %s entries are stale." % (updated, not_updated)
if __name__ == "__main__":
main()
|
|
6dd0130e64d04d66d5325c1e76995056ceacf453
|
PyGitUp/tests/test_faster_fastforwarded.py
|
PyGitUp/tests/test_faster_fastforwarded.py
|
# System imports
import os
from os.path import join
from git import *
from nose.tools import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'faster-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
global master, repo
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.checkout('origin/' + test_name, b=test_name + '.2')
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_faster_forwarded():
""" Run 'git up' with result: (fast) fast-forwarding """
os.chdir(repo_path)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert_equal(len(gitup.states), 2)
assert_equal(gitup.states[0], 'fast-forwarding')
assert_equal(gitup.states[1], 'fast-forwarding')
assert_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
|
Add test for faster fastforwarded
|
Add test for faster fastforwarded
|
Python
|
mit
|
msiemens/PyGitUp
|
Add test for faster fastforwarded
|
# System imports
import os
from os.path import join
from git import *
from nose.tools import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'faster-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
global master, repo
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.checkout('origin/' + test_name, b=test_name + '.2')
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_faster_forwarded():
""" Run 'git up' with result: (fast) fast-forwarding """
os.chdir(repo_path)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert_equal(len(gitup.states), 2)
assert_equal(gitup.states[0], 'fast-forwarding')
assert_equal(gitup.states[1], 'fast-forwarding')
assert_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
|
<commit_before><commit_msg>Add test for faster fastforwarded<commit_after>
|
# System imports
import os
from os.path import join
from git import *
from nose.tools import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'faster-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
global master, repo
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.checkout('origin/' + test_name, b=test_name + '.2')
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_faster_forwarded():
""" Run 'git up' with result: (fast) fast-forwarding """
os.chdir(repo_path)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert_equal(len(gitup.states), 2)
assert_equal(gitup.states[0], 'fast-forwarding')
assert_equal(gitup.states[1], 'fast-forwarding')
assert_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
|
Add test for faster fastforwarded# System imports
import os
from os.path import join
from git import *
from nose.tools import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'faster-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
global master, repo
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.checkout('origin/' + test_name, b=test_name + '.2')
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_faster_forwarded():
""" Run 'git up' with result: (fast) fast-forwarding """
os.chdir(repo_path)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert_equal(len(gitup.states), 2)
assert_equal(gitup.states[0], 'fast-forwarding')
assert_equal(gitup.states[1], 'fast-forwarding')
assert_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
|
<commit_before><commit_msg>Add test for faster fastforwarded<commit_after># System imports
import os
from os.path import join
from git import *
from nose.tools import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'faster-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
global master, repo
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
repo.git.checkout('origin/' + test_name, b=test_name + '.2')
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_faster_forwarded():
""" Run 'git up' with result: (fast) fast-forwarding """
os.chdir(repo_path)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_not_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert_equal(len(gitup.states), 2)
assert_equal(gitup.states[0], 'fast-forwarding')
assert_equal(gitup.states[1], 'fast-forwarding')
assert_equal(master.branches[test_name].commit,
repo.branches[test_name].commit)
assert_equal(master.branches[test_name].commit,
repo.branches[test_name + '.2'].commit)
|
|
e44ae7f701a75f2c61546493fab485194cabbf71
|
netadmin/networks/utils.py
|
netadmin/networks/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from IPy import IP
def IPv6_validation(value):
try:
6 == IP(value).version()
except ValueError:
import pdb;pdb.set_trace()
raise ValidationError(u'%s is not a correct IPv6 address' % value)
def IPv4_validation(value):
try:
4 == IP(value).version()
except ValueError:
raise ValidationError(u'%s is not a correct IPv4 address' % value)
|
Define IPv4_validation and IPv6_validation method
|
Define IPv4_validation and IPv6_validation method
|
Python
|
agpl-3.0
|
umitproject/network-admin,umitproject/network-admin
|
Define IPv4_validation and IPv6_validation method
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from IPy import IP
def IPv6_validation(value):
try:
6 == IP(value).version()
except ValueError:
import pdb;pdb.set_trace()
raise ValidationError(u'%s is not a correct IPv6 address' % value)
def IPv4_validation(value):
try:
4 == IP(value).version()
except ValueError:
raise ValidationError(u'%s is not a correct IPv4 address' % value)
|
<commit_before><commit_msg>Define IPv4_validation and IPv6_validation method<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from IPy import IP
def IPv6_validation(value):
try:
6 == IP(value).version()
except ValueError:
import pdb;pdb.set_trace()
raise ValidationError(u'%s is not a correct IPv6 address' % value)
def IPv4_validation(value):
try:
4 == IP(value).version()
except ValueError:
raise ValidationError(u'%s is not a correct IPv4 address' % value)
|
Define IPv4_validation and IPv6_validation method#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from IPy import IP
def IPv6_validation(value):
try:
6 == IP(value).version()
except ValueError:
import pdb;pdb.set_trace()
raise ValidationError(u'%s is not a correct IPv6 address' % value)
def IPv4_validation(value):
try:
4 == IP(value).version()
except ValueError:
raise ValidationError(u'%s is not a correct IPv4 address' % value)
|
<commit_before><commit_msg>Define IPv4_validation and IPv6_validation method<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Amit Pal <amix.pal@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from IPy import IP
def IPv6_validation(value):
try:
6 == IP(value).version()
except ValueError:
import pdb;pdb.set_trace()
raise ValidationError(u'%s is not a correct IPv6 address' % value)
def IPv4_validation(value):
try:
4 == IP(value).version()
except ValueError:
raise ValidationError(u'%s is not a correct IPv4 address' % value)
|
|
518f9bff28585aa1eeb12b9b12d95e32fb257725
|
src/district_distance.py
|
src/district_distance.py
|
# coding: utf-8
# In[79]:
import math
import operator
import json
from geopy.distance import great_circle
# In[90]:
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
# In[134]:
get_district_info(); # test for json reading
# In[128]:
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
# In[131]:
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at center of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
# In[136]:
#order_districts(27.67298,85.43005,get_district_info())[0] # test for distance
# In[121]:
# In[ ]:
|
Return the districts name by distance given coordinates
|
Return the districts name by distance given coordinates
|
Python
|
apache-2.0
|
ldolberg/the_port_ors_hdx,ldolberg/the_port_ors_hdx
|
Return the districts name by distance given coordinates
|
# coding: utf-8
# In[79]:
import math
import operator
import json
from geopy.distance import great_circle
# In[90]:
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
# In[134]:
get_district_info(); # test for json reading
# In[128]:
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
# In[131]:
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at center of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
# In[136]:
#order_districts(27.67298,85.43005,get_district_info())[0] # test for distance
# In[121]:
# In[ ]:
|
<commit_before><commit_msg>Return the districts name by distance given coordinates<commit_after>
|
# coding: utf-8
# In[79]:
import math
import operator
import json
from geopy.distance import great_circle
# In[90]:
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
# In[134]:
get_district_info(); # test for json reading
# In[128]:
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
# In[131]:
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at center of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
# In[136]:
#order_districts(27.67298,85.43005,get_district_info())[0] # test for distance
# In[121]:
# In[ ]:
|
Return the districts name by distance given coordinates
# coding: utf-8
# In[79]:
import math
import operator
import json
from geopy.distance import great_circle
# In[90]:
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
# In[134]:
get_district_info(); # test for json reading
# In[128]:
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
# In[131]:
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at center of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
# In[136]:
#order_districts(27.67298,85.43005,get_district_info())[0] # test for distance
# In[121]:
# In[ ]:
|
<commit_before><commit_msg>Return the districts name by distance given coordinates<commit_after>
# coding: utf-8
# In[79]:
import math
import operator
import json
from geopy.distance import great_circle
# In[90]:
class Order_districts():
def get_district_info():
# -- get names and coordinates from csv file
with open('coordinates.json') as coord_file:
district_dict = json.load(coord_file)
return district_dict
# In[134]:
get_district_info(); # test for json reading
# In[128]:
def distance(lat0, lon0, lat, lon):
'''
Calculates distance on Earth's surface in meters
'''
return great_circle((lat0,lon0), (lat,lon)).meters
def e_distance(x,y,w,z):
'''
Euclidean distance calculation for simple sorting purposes
'''
a = math.pow(x - w,2)
b = math.pow(y - z,2)
return math.sqrt(a+b)
# In[131]:
def order_districts(lat0, lon0, district_dict):
'''
function that return a list of names of districts ordered by distance from the point (lat0,lon0) passed from map
Inputs: 'lat0' = latitude of point at center of map
'lon0' = longitude of point at center of map
'district_dict' = dict of district names and (lat,lon) from function get_district_info()
Outputs: df with district names ordered by distance, coordinates of district (lat,lon)
'''
distance_dict={}
# -- loop thru entries in coord/name dictionary
for key, value in district_dict.iteritems():
lat = float(value[0]); lon = float(value[1]);
# -- calculate coords in radians
#Delta_lat = math.radians(lat0-lat) # latitudinal distance
#Delta_lon = math.radians(lon0-lon) # longitudinal distance
#lat0 = math.radians(lat0) # convert to radians
#lat = math.radians(lat)
distance_dict[key] = distance(lat0, lon0, lat, lon)
sorted_districts = sorted(distance_dict.items(), key=operator.itemgetter(1))
return zip(*sorted_districts)
# In[136]:
#order_districts(27.67298,85.43005,get_district_info())[0] # test for distance
# In[121]:
# In[ ]:
|
|
0d0d32feab4527f78ef4dbdb1cd890aa72851b18
|
bioagents/resources/trips_ont_manager.py
|
bioagents/resources/trips_ont_manager.py
|
import os
from indra.preassembler.hierarchy_manager import HierarchyManager
# Make a TRIPS ontology
_fname = os.path.join(os.path.dirname(__file__), 'trips_ontology.rdf')
trips_ontology = HierarchyManager(_fname, uri_as_name=False)
trips_ontology.relations_prefix = 'http://trips.ihmc.us/relations/'
trips_ontology.initialize()
def trips_isa(concept1, concept2):
# Preprocess to make this more general
concept1 = concept1.lower().replace('ont::', '')
concept2 = concept2.lower().replace('ont::', '')
isa = trips_ontology.isa('http://trips.ihmc.us/concepts/', concept1,
'http://trips.ihmc.us/concepts/', concept2)
return isa
|
Implement TRIPS ontology manager with trips_isa
|
Implement TRIPS ontology manager with trips_isa
|
Python
|
bsd-2-clause
|
sorgerlab/bioagents,bgyori/bioagents
|
Implement TRIPS ontology manager with trips_isa
|
import os
from indra.preassembler.hierarchy_manager import HierarchyManager
# Make a TRIPS ontology
_fname = os.path.join(os.path.dirname(__file__), 'trips_ontology.rdf')
trips_ontology = HierarchyManager(_fname, uri_as_name=False)
trips_ontology.relations_prefix = 'http://trips.ihmc.us/relations/'
trips_ontology.initialize()
def trips_isa(concept1, concept2):
# Preprocess to make this more general
concept1 = concept1.lower().replace('ont::', '')
concept2 = concept2.lower().replace('ont::', '')
isa = trips_ontology.isa('http://trips.ihmc.us/concepts/', concept1,
'http://trips.ihmc.us/concepts/', concept2)
return isa
|
<commit_before><commit_msg>Implement TRIPS ontology manager with trips_isa<commit_after>
|
import os
from indra.preassembler.hierarchy_manager import HierarchyManager
# Make a TRIPS ontology
_fname = os.path.join(os.path.dirname(__file__), 'trips_ontology.rdf')
trips_ontology = HierarchyManager(_fname, uri_as_name=False)
trips_ontology.relations_prefix = 'http://trips.ihmc.us/relations/'
trips_ontology.initialize()
def trips_isa(concept1, concept2):
# Preprocess to make this more general
concept1 = concept1.lower().replace('ont::', '')
concept2 = concept2.lower().replace('ont::', '')
isa = trips_ontology.isa('http://trips.ihmc.us/concepts/', concept1,
'http://trips.ihmc.us/concepts/', concept2)
return isa
|
Implement TRIPS ontology manager with trips_isaimport os
from indra.preassembler.hierarchy_manager import HierarchyManager
# Make a TRIPS ontology
_fname = os.path.join(os.path.dirname(__file__), 'trips_ontology.rdf')
trips_ontology = HierarchyManager(_fname, uri_as_name=False)
trips_ontology.relations_prefix = 'http://trips.ihmc.us/relations/'
trips_ontology.initialize()
def trips_isa(concept1, concept2):
# Preprocess to make this more general
concept1 = concept1.lower().replace('ont::', '')
concept2 = concept2.lower().replace('ont::', '')
isa = trips_ontology.isa('http://trips.ihmc.us/concepts/', concept1,
'http://trips.ihmc.us/concepts/', concept2)
return isa
|
<commit_before><commit_msg>Implement TRIPS ontology manager with trips_isa<commit_after>import os
from indra.preassembler.hierarchy_manager import HierarchyManager
# Make a TRIPS ontology
_fname = os.path.join(os.path.dirname(__file__), 'trips_ontology.rdf')
trips_ontology = HierarchyManager(_fname, uri_as_name=False)
trips_ontology.relations_prefix = 'http://trips.ihmc.us/relations/'
trips_ontology.initialize()
def trips_isa(concept1, concept2):
# Preprocess to make this more general
concept1 = concept1.lower().replace('ont::', '')
concept2 = concept2.lower().replace('ont::', '')
isa = trips_ontology.isa('http://trips.ihmc.us/concepts/', concept1,
'http://trips.ihmc.us/concepts/', concept2)
return isa
|
|
336f78f4a997051ea70100d291c2206475bd86de
|
pocean/tests/test_cf.py
|
pocean/tests/test_cf.py
|
#!python
# coding=utf-8
import unittest
from pocean.cf import CFDataset
from pocean.dsg import OrthogonalMultidimensionalTimeseries as omt
import logging
from pocean import logger as L
L.level = logging.INFO
L.handlers = [logging.StreamHandler()]
class TestCFDatasetLoad(unittest.TestCase):
def test_load_url(self):
ncd = CFDataset.load('http://geoport.whoi.edu/thredds/dodsC/usgs/data2/emontgomery/stellwagen/CF-1.6/ARGO_MERCHANT/1211-AA.cdf')
assert omt.is_mine(ncd) is True
ncd.close()
|
Add a test for loading file over a URL (dap)
|
Add a test for loading file over a URL (dap)
|
Python
|
mit
|
pyoceans/pocean-core,pyoceans/pocean-core
|
Add a test for loading file over a URL (dap)
|
#!python
# coding=utf-8
import unittest
from pocean.cf import CFDataset
from pocean.dsg import OrthogonalMultidimensionalTimeseries as omt
import logging
from pocean import logger as L
L.level = logging.INFO
L.handlers = [logging.StreamHandler()]
class TestCFDatasetLoad(unittest.TestCase):
def test_load_url(self):
ncd = CFDataset.load('http://geoport.whoi.edu/thredds/dodsC/usgs/data2/emontgomery/stellwagen/CF-1.6/ARGO_MERCHANT/1211-AA.cdf')
assert omt.is_mine(ncd) is True
ncd.close()
|
<commit_before><commit_msg>Add a test for loading file over a URL (dap)<commit_after>
|
#!python
# coding=utf-8
import unittest
from pocean.cf import CFDataset
from pocean.dsg import OrthogonalMultidimensionalTimeseries as omt
import logging
from pocean import logger as L
L.level = logging.INFO
L.handlers = [logging.StreamHandler()]
class TestCFDatasetLoad(unittest.TestCase):
def test_load_url(self):
ncd = CFDataset.load('http://geoport.whoi.edu/thredds/dodsC/usgs/data2/emontgomery/stellwagen/CF-1.6/ARGO_MERCHANT/1211-AA.cdf')
assert omt.is_mine(ncd) is True
ncd.close()
|
Add a test for loading file over a URL (dap)#!python
# coding=utf-8
import unittest
from pocean.cf import CFDataset
from pocean.dsg import OrthogonalMultidimensionalTimeseries as omt
import logging
from pocean import logger as L
L.level = logging.INFO
L.handlers = [logging.StreamHandler()]
class TestCFDatasetLoad(unittest.TestCase):
def test_load_url(self):
ncd = CFDataset.load('http://geoport.whoi.edu/thredds/dodsC/usgs/data2/emontgomery/stellwagen/CF-1.6/ARGO_MERCHANT/1211-AA.cdf')
assert omt.is_mine(ncd) is True
ncd.close()
|
<commit_before><commit_msg>Add a test for loading file over a URL (dap)<commit_after>#!python
# coding=utf-8
import unittest
from pocean.cf import CFDataset
from pocean.dsg import OrthogonalMultidimensionalTimeseries as omt
import logging
from pocean import logger as L
L.level = logging.INFO
L.handlers = [logging.StreamHandler()]
class TestCFDatasetLoad(unittest.TestCase):
def test_load_url(self):
ncd = CFDataset.load('http://geoport.whoi.edu/thredds/dodsC/usgs/data2/emontgomery/stellwagen/CF-1.6/ARGO_MERCHANT/1211-AA.cdf')
assert omt.is_mine(ncd) is True
ncd.close()
|
|
4ff82aaee04666a5ccc2b56602c03f3226220fcb
|
scripts/runner.py
|
scripts/runner.py
|
#!/usr/bin/env python3
import sys
import os
import logging
import subprocess
# get the root handler to update his behavior (No prefix, no endline)
logger = logging.getLogger()
def logged_call(command, verbose=False):
"""
A logged version of subprocess.call. Do not wait the end of the
process to start logging
"""
logger.debug('CMD: {0}'.format(command))
# if true, redirect stderr to stdout, otherwise, to NULL
if verbose:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
else:
null = open(os.devnull, 'w')
stdout = null
stderr = null
#Update temporally the handler (No prefix, no endline)
old_handlers = {handler: handler.formatter for handler in logger.handlers}
for handler in logger.handlers:
handler.formatter = logging.Formatter('%(message)s')
handler.terminator = ''
with subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True, bufsize=0) as process:
if verbose:
while process.poll() is None:
logger.info(os.read(process.stdout.fileno(), 1024).decode())
# rehabilitate previous handler
for handler in logger.handlers:
handler.formatter = old_handlers[handler]
handler.terminator = '\n'
return process.wait()
def logged_check_call(command, verbose=False):
"""
A logged version of subprocess.check_call
"""
returncode = logged_call(command, verbose=verbose)
if returncode != 0:
logger.fatal('The last command returns a non-zero return code: %s' % returncode)
sys.exit('Non-zero return code')
|
Add some methods to log system calls
|
Add some methods to log system calls
|
Python
|
agpl-3.0
|
bonsai-team/matam,bonsai-team/matam,ppericard/matamog,ppericard/matamog,ppericard/matam,ppericard/matam,bonsai-team/matam,bonsai-team/matam,ppericard/matam,ppericard/matamog,ppericard/matamog
|
Add some methods to log system calls
|
#!/usr/bin/env python3
import sys
import os
import logging
import subprocess
# get the root handler to update his behavior (No prefix, no endline)
logger = logging.getLogger()
def logged_call(command, verbose=False):
"""
A logged version of subprocess.call. Do not wait the end of the
process to start logging
"""
logger.debug('CMD: {0}'.format(command))
# if true, redirect stderr to stdout, otherwise, to NULL
if verbose:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
else:
null = open(os.devnull, 'w')
stdout = null
stderr = null
#Update temporally the handler (No prefix, no endline)
old_handlers = {handler: handler.formatter for handler in logger.handlers}
for handler in logger.handlers:
handler.formatter = logging.Formatter('%(message)s')
handler.terminator = ''
with subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True, bufsize=0) as process:
if verbose:
while process.poll() is None:
logger.info(os.read(process.stdout.fileno(), 1024).decode())
# rehabilitate previous handler
for handler in logger.handlers:
handler.formatter = old_handlers[handler]
handler.terminator = '\n'
return process.wait()
def logged_check_call(command, verbose=False):
"""
A logged version of subprocess.check_call
"""
returncode = logged_call(command, verbose=verbose)
if returncode != 0:
logger.fatal('The last command returns a non-zero return code: %s' % returncode)
sys.exit('Non-zero return code')
|
<commit_before><commit_msg>Add some methods to log system calls<commit_after>
|
#!/usr/bin/env python3
import sys
import os
import logging
import subprocess
# get the root handler to update his behavior (No prefix, no endline)
logger = logging.getLogger()
def logged_call(command, verbose=False):
"""
A logged version of subprocess.call. Do not wait the end of the
process to start logging
"""
logger.debug('CMD: {0}'.format(command))
# if true, redirect stderr to stdout, otherwise, to NULL
if verbose:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
else:
null = open(os.devnull, 'w')
stdout = null
stderr = null
#Update temporally the handler (No prefix, no endline)
old_handlers = {handler: handler.formatter for handler in logger.handlers}
for handler in logger.handlers:
handler.formatter = logging.Formatter('%(message)s')
handler.terminator = ''
with subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True, bufsize=0) as process:
if verbose:
while process.poll() is None:
logger.info(os.read(process.stdout.fileno(), 1024).decode())
# rehabilitate previous handler
for handler in logger.handlers:
handler.formatter = old_handlers[handler]
handler.terminator = '\n'
return process.wait()
def logged_check_call(command, verbose=False):
"""
A logged version of subprocess.check_call
"""
returncode = logged_call(command, verbose=verbose)
if returncode != 0:
logger.fatal('The last command returns a non-zero return code: %s' % returncode)
sys.exit('Non-zero return code')
|
Add some methods to log system calls#!/usr/bin/env python3
import sys
import os
import logging
import subprocess
# get the root handler to update his behavior (No prefix, no endline)
logger = logging.getLogger()
def logged_call(command, verbose=False):
"""
A logged version of subprocess.call. Do not wait the end of the
process to start logging
"""
logger.debug('CMD: {0}'.format(command))
# if true, redirect stderr to stdout, otherwise, to NULL
if verbose:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
else:
null = open(os.devnull, 'w')
stdout = null
stderr = null
#Update temporally the handler (No prefix, no endline)
old_handlers = {handler: handler.formatter for handler in logger.handlers}
for handler in logger.handlers:
handler.formatter = logging.Formatter('%(message)s')
handler.terminator = ''
with subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True, bufsize=0) as process:
if verbose:
while process.poll() is None:
logger.info(os.read(process.stdout.fileno(), 1024).decode())
# rehabilitate previous handler
for handler in logger.handlers:
handler.formatter = old_handlers[handler]
handler.terminator = '\n'
return process.wait()
def logged_check_call(command, verbose=False):
"""
A logged version of subprocess.check_call
"""
returncode = logged_call(command, verbose=verbose)
if returncode != 0:
logger.fatal('The last command returns a non-zero return code: %s' % returncode)
sys.exit('Non-zero return code')
|
<commit_before><commit_msg>Add some methods to log system calls<commit_after>#!/usr/bin/env python3
import sys
import os
import logging
import subprocess
# get the root handler to update his behavior (No prefix, no endline)
logger = logging.getLogger()
def logged_call(command, verbose=False):
"""
A logged version of subprocess.call. Do not wait the end of the
process to start logging
"""
logger.debug('CMD: {0}'.format(command))
# if true, redirect stderr to stdout, otherwise, to NULL
if verbose:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
else:
null = open(os.devnull, 'w')
stdout = null
stderr = null
#Update temporally the handler (No prefix, no endline)
old_handlers = {handler: handler.formatter for handler in logger.handlers}
for handler in logger.handlers:
handler.formatter = logging.Formatter('%(message)s')
handler.terminator = ''
with subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True, bufsize=0) as process:
if verbose:
while process.poll() is None:
logger.info(os.read(process.stdout.fileno(), 1024).decode())
# rehabilitate previous handler
for handler in logger.handlers:
handler.formatter = old_handlers[handler]
handler.terminator = '\n'
return process.wait()
def logged_check_call(command, verbose=False):
"""
A logged version of subprocess.check_call
"""
returncode = logged_call(command, verbose=verbose)
if returncode != 0:
logger.fatal('The last command returns a non-zero return code: %s' % returncode)
sys.exit('Non-zero return code')
|
|
80ab7462ca9379b2dce9a10519bb986f8725b268
|
tasks.py
|
tasks.py
|
import os
import subprocess
import currint
from invoke import task
VERSION_FILE = os.path.join(os.path.dirname(currint.__file__), "__init__.py")
def _write_to_version_file(version):
with open(VERSION_FILE, 'r') as version_read:
output = []
for line in version_read:
if line.startswith('__version__'):
output.append('__version__ = %r' % version)
else:
output.append(line.strip())
with open(VERSION_FILE, 'w') as version_write:
for line in output:
version_write.write(line + '\n')
def _commit_and_tag(version):
"""Commit changes to version file and tag the release"""
subprocess.check_call(
['git', 'add', VERSION_FILE],
)
subprocess.check_call(
['git', 'commit', '-m', "Releasing version %s" % version]
)
subprocess.check_call(
['git', 'tag', version]
)
def _push_release_changes(version):
push = raw_input('Push release changes to master? (y/n): ')
if push == 'y':
print subprocess.check_output(
['git', 'push', 'origin', 'master']
)
# push the release tag
print subprocess.check_output(
['git', 'push', 'origin', version]
)
else:
print 'Not pushing changes to master!'
print 'Make sure you remember to explictily push the tag!'
@task
def release():
# Prompt for version
print "Current version: %s" % currint.__version__
release_version = raw_input('Enter a new version (or "exit"): ')
if not release_version or release_version == 'exit':
print 'Cancelling release!'
return
_write_to_version_file(release_version)
_commit_and_tag(release_version)
_push_release_changes(release_version)
|
Add invoke file for releases
|
Add invoke file for releases
|
Python
|
apache-2.0
|
eventbrite/currint,ebmshenfield/currint
|
Add invoke file for releases
|
import os
import subprocess
import currint
from invoke import task
VERSION_FILE = os.path.join(os.path.dirname(currint.__file__), "__init__.py")
def _write_to_version_file(version):
with open(VERSION_FILE, 'r') as version_read:
output = []
for line in version_read:
if line.startswith('__version__'):
output.append('__version__ = %r' % version)
else:
output.append(line.strip())
with open(VERSION_FILE, 'w') as version_write:
for line in output:
version_write.write(line + '\n')
def _commit_and_tag(version):
"""Commit changes to version file and tag the release"""
subprocess.check_call(
['git', 'add', VERSION_FILE],
)
subprocess.check_call(
['git', 'commit', '-m', "Releasing version %s" % version]
)
subprocess.check_call(
['git', 'tag', version]
)
def _push_release_changes(version):
push = raw_input('Push release changes to master? (y/n): ')
if push == 'y':
print subprocess.check_output(
['git', 'push', 'origin', 'master']
)
# push the release tag
print subprocess.check_output(
['git', 'push', 'origin', version]
)
else:
print 'Not pushing changes to master!'
print 'Make sure you remember to explictily push the tag!'
@task
def release():
# Prompt for version
print "Current version: %s" % currint.__version__
release_version = raw_input('Enter a new version (or "exit"): ')
if not release_version or release_version == 'exit':
print 'Cancelling release!'
return
_write_to_version_file(release_version)
_commit_and_tag(release_version)
_push_release_changes(release_version)
|
<commit_before><commit_msg>Add invoke file for releases<commit_after>
|
import os
import subprocess
import currint
from invoke import task
VERSION_FILE = os.path.join(os.path.dirname(currint.__file__), "__init__.py")
def _write_to_version_file(version):
with open(VERSION_FILE, 'r') as version_read:
output = []
for line in version_read:
if line.startswith('__version__'):
output.append('__version__ = %r' % version)
else:
output.append(line.strip())
with open(VERSION_FILE, 'w') as version_write:
for line in output:
version_write.write(line + '\n')
def _commit_and_tag(version):
"""Commit changes to version file and tag the release"""
subprocess.check_call(
['git', 'add', VERSION_FILE],
)
subprocess.check_call(
['git', 'commit', '-m', "Releasing version %s" % version]
)
subprocess.check_call(
['git', 'tag', version]
)
def _push_release_changes(version):
push = raw_input('Push release changes to master? (y/n): ')
if push == 'y':
print subprocess.check_output(
['git', 'push', 'origin', 'master']
)
# push the release tag
print subprocess.check_output(
['git', 'push', 'origin', version]
)
else:
print 'Not pushing changes to master!'
print 'Make sure you remember to explictily push the tag!'
@task
def release():
# Prompt for version
print "Current version: %s" % currint.__version__
release_version = raw_input('Enter a new version (or "exit"): ')
if not release_version or release_version == 'exit':
print 'Cancelling release!'
return
_write_to_version_file(release_version)
_commit_and_tag(release_version)
_push_release_changes(release_version)
|
Add invoke file for releasesimport os
import subprocess
import currint
from invoke import task
VERSION_FILE = os.path.join(os.path.dirname(currint.__file__), "__init__.py")
def _write_to_version_file(version):
with open(VERSION_FILE, 'r') as version_read:
output = []
for line in version_read:
if line.startswith('__version__'):
output.append('__version__ = %r' % version)
else:
output.append(line.strip())
with open(VERSION_FILE, 'w') as version_write:
for line in output:
version_write.write(line + '\n')
def _commit_and_tag(version):
"""Commit changes to version file and tag the release"""
subprocess.check_call(
['git', 'add', VERSION_FILE],
)
subprocess.check_call(
['git', 'commit', '-m', "Releasing version %s" % version]
)
subprocess.check_call(
['git', 'tag', version]
)
def _push_release_changes(version):
push = raw_input('Push release changes to master? (y/n): ')
if push == 'y':
print subprocess.check_output(
['git', 'push', 'origin', 'master']
)
# push the release tag
print subprocess.check_output(
['git', 'push', 'origin', version]
)
else:
print 'Not pushing changes to master!'
print 'Make sure you remember to explictily push the tag!'
@task
def release():
# Prompt for version
print "Current version: %s" % currint.__version__
release_version = raw_input('Enter a new version (or "exit"): ')
if not release_version or release_version == 'exit':
print 'Cancelling release!'
return
_write_to_version_file(release_version)
_commit_and_tag(release_version)
_push_release_changes(release_version)
|
<commit_before><commit_msg>Add invoke file for releases<commit_after>import os
import subprocess
import currint
from invoke import task
VERSION_FILE = os.path.join(os.path.dirname(currint.__file__), "__init__.py")
def _write_to_version_file(version):
with open(VERSION_FILE, 'r') as version_read:
output = []
for line in version_read:
if line.startswith('__version__'):
output.append('__version__ = %r' % version)
else:
output.append(line.strip())
with open(VERSION_FILE, 'w') as version_write:
for line in output:
version_write.write(line + '\n')
def _commit_and_tag(version):
"""Commit changes to version file and tag the release"""
subprocess.check_call(
['git', 'add', VERSION_FILE],
)
subprocess.check_call(
['git', 'commit', '-m', "Releasing version %s" % version]
)
subprocess.check_call(
['git', 'tag', version]
)
def _push_release_changes(version):
push = raw_input('Push release changes to master? (y/n): ')
if push == 'y':
print subprocess.check_output(
['git', 'push', 'origin', 'master']
)
# push the release tag
print subprocess.check_output(
['git', 'push', 'origin', version]
)
else:
print 'Not pushing changes to master!'
print 'Make sure you remember to explictily push the tag!'
@task
def release():
# Prompt for version
print "Current version: %s" % currint.__version__
release_version = raw_input('Enter a new version (or "exit"): ')
if not release_version or release_version == 'exit':
print 'Cancelling release!'
return
_write_to_version_file(release_version)
_commit_and_tag(release_version)
_push_release_changes(release_version)
|
|
689dfc738c37358935a2c3882215c6bc225682c5
|
setup.py
|
setup.py
|
import os
import subprocess
def main():
root_path = os.path.dirname(__file__)
cmd = ['cmake',
'-G', 'Visual Studio 15 2017 Win64',
root_path]
subprocess.call(cmd)
if __name__ == '__main__':
main()
|
Add script to generate cmake files
|
Add script to generate cmake files
|
Python
|
unknown
|
fizixx/nucleus,tiaanl/nucleus
|
Add script to generate cmake files
|
import os
import subprocess
def main():
root_path = os.path.dirname(__file__)
cmd = ['cmake',
'-G', 'Visual Studio 15 2017 Win64',
root_path]
subprocess.call(cmd)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate cmake files<commit_after>
|
import os
import subprocess
def main():
root_path = os.path.dirname(__file__)
cmd = ['cmake',
'-G', 'Visual Studio 15 2017 Win64',
root_path]
subprocess.call(cmd)
if __name__ == '__main__':
main()
|
Add script to generate cmake filesimport os
import subprocess
def main():
root_path = os.path.dirname(__file__)
cmd = ['cmake',
'-G', 'Visual Studio 15 2017 Win64',
root_path]
subprocess.call(cmd)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate cmake files<commit_after>import os
import subprocess
def main():
root_path = os.path.dirname(__file__)
cmd = ['cmake',
'-G', 'Visual Studio 15 2017 Win64',
root_path]
subprocess.call(cmd)
if __name__ == '__main__':
main()
|
|
1d8beff18749d3eefe5c1df87650469982584159
|
tests.py
|
tests.py
|
from os import walk
import os
import unittest
class MdTestCase(unittest.TestCase):
def test_articles(self):
for (dirpath, dirnames, filenames) in walk("md"):
for x in filenames:
path = dirpath + os.path.sep + x
htmlpath = path.replace("md" + os.path.sep, "pages" + os.path.sep).replace(".md", ".html")
self.assertTrue(os.path.exists(htmlpath), msg="Article found with no matching page! Article = {}".format(path))
if __name__ == '__main__':
unittest.main()
|
Add test unit to check for matching md -> page
|
Add test unit to check for matching md -> page
|
Python
|
mit
|
PotteriesHackspace/knowledgebase,PotteriesHackspace/knowledgebase
|
Add test unit to check for matching md -> page
|
from os import walk
import os
import unittest
class MdTestCase(unittest.TestCase):
def test_articles(self):
for (dirpath, dirnames, filenames) in walk("md"):
for x in filenames:
path = dirpath + os.path.sep + x
htmlpath = path.replace("md" + os.path.sep, "pages" + os.path.sep).replace(".md", ".html")
self.assertTrue(os.path.exists(htmlpath), msg="Article found with no matching page! Article = {}".format(path))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test unit to check for matching md -> page<commit_after>
|
from os import walk
import os
import unittest
class MdTestCase(unittest.TestCase):
def test_articles(self):
for (dirpath, dirnames, filenames) in walk("md"):
for x in filenames:
path = dirpath + os.path.sep + x
htmlpath = path.replace("md" + os.path.sep, "pages" + os.path.sep).replace(".md", ".html")
self.assertTrue(os.path.exists(htmlpath), msg="Article found with no matching page! Article = {}".format(path))
if __name__ == '__main__':
unittest.main()
|
Add test unit to check for matching md -> pagefrom os import walk
import os
import unittest
class MdTestCase(unittest.TestCase):
def test_articles(self):
for (dirpath, dirnames, filenames) in walk("md"):
for x in filenames:
path = dirpath + os.path.sep + x
htmlpath = path.replace("md" + os.path.sep, "pages" + os.path.sep).replace(".md", ".html")
self.assertTrue(os.path.exists(htmlpath), msg="Article found with no matching page! Article = {}".format(path))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test unit to check for matching md -> page<commit_after>from os import walk
import os
import unittest
class MdTestCase(unittest.TestCase):
def test_articles(self):
for (dirpath, dirnames, filenames) in walk("md"):
for x in filenames:
path = dirpath + os.path.sep + x
htmlpath = path.replace("md" + os.path.sep, "pages" + os.path.sep).replace(".md", ".html")
self.assertTrue(os.path.exists(htmlpath), msg="Article found with no matching page! Article = {}".format(path))
if __name__ == '__main__':
unittest.main()
|
|
88c0b490bcbba1191e8878aa421c3d32002cea0e
|
cms/test_utils/project/placeholderapp/migrations_django/0004_auto_20150415_1913.py
|
cms/test_utils/project/placeholderapp/migrations_django/0004_auto_20150415_1913.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('placeholderapp', '0003_example1_publish'),
]
operations = [
migrations.AlterModelOptions(
name='multilingualexample1translation',
options={'managed': True},
),
]
|
Update translations (for internal test project, cms itself has none)
|
Update translations (for internal test project, cms itself has none)
|
Python
|
bsd-3-clause
|
SmithsonianEnterprises/django-cms,SmithsonianEnterprises/django-cms,dhorelik/django-cms,sznekol/django-cms,chmberl/django-cms,liuyisiyisi/django-cms,josjevv/django-cms,irudayarajisawa/django-cms,evildmp/django-cms,evildmp/django-cms,robmagee/django-cms,jeffreylu9/django-cms,stefanfoulis/django-cms,frnhr/django-cms,intip/django-cms,owers19856/django-cms,andyzsf/django-cms,dhorelik/django-cms,SofiaReis/django-cms,jeffreylu9/django-cms,SachaMPS/django-cms,memnonila/django-cms,jproffitt/django-cms,webu/django-cms,rscnt/django-cms,philippze/django-cms,divio/django-cms,timgraham/django-cms,SofiaReis/django-cms,vxsx/django-cms,vad/django-cms,intip/django-cms,liuyisiyisi/django-cms,dhorelik/django-cms,jsma/django-cms,DylannCordel/django-cms,benzkji/django-cms,mkoistinen/django-cms,leture/django-cms,youprofit/django-cms,rscnt/django-cms,rryan/django-cms,sznekol/django-cms,rryan/django-cms,Jaccorot/django-cms,memnonila/django-cms,datakortet/django-cms,intip/django-cms,evildmp/django-cms,owers19856/django-cms,iddqd1/django-cms,qnub/django-cms,sznekol/django-cms,kk9599/django-cms,datakortet/django-cms,andyzsf/django-cms,divio/django-cms,Livefyre/django-cms,czpython/django-cms,vad/django-cms,jsma/django-cms,jsma/django-cms,rsalmaso/django-cms,rsalmaso/django-cms,benzkji/django-cms,Vegasvikk/django-cms,mkoistinen/django-cms,czpython/django-cms,farhaadila/django-cms,irudayarajisawa/django-cms,robmagee/django-cms,netzkolchose/django-cms,owers19856/django-cms,qnub/django-cms,timgraham/django-cms,FinalAngel/django-cms,josjevv/django-cms,Vegasvikk/django-cms,bittner/django-cms,takeshineshiro/django-cms,robmagee/django-cms,SofiaReis/django-cms,cyberintruder/django-cms,chkir/django-cms,wyg3958/django-cms,rsalmaso/django-cms,keimlink/django-cms,FinalAngel/django-cms,saintbird/django-cms,philippze/django-cms,andyzsf/django-cms,farhaadila/django-cms,memnonila/django-cms,stefanfoulis/django-cms,webu/django-cms,Livefyre/django-cms,youprofit/django-cms,jsma/django-cms,czpython/django-cms,vxsx/django-cms,jproffitt/django-cms,wyg3958/django-cms,chkir/django-cms,FinalAngel/django-cms,youprofit/django-cms,vad/django-cms,petecummings/django-cms,divio/django-cms,iddqd1/django-cms,divio/django-cms,saintbird/django-cms,iddqd1/django-cms,chkir/django-cms,cyberintruder/django-cms,chmberl/django-cms,timgraham/django-cms,nimbis/django-cms,qnub/django-cms,AlexProfi/django-cms,keimlink/django-cms,rryan/django-cms,datakortet/django-cms,farhaadila/django-cms,vxsx/django-cms,keimlink/django-cms,nimbis/django-cms,kk9599/django-cms,leture/django-cms,kk9599/django-cms,takeshineshiro/django-cms,FinalAngel/django-cms,vxsx/django-cms,frnhr/django-cms,philippze/django-cms,netzkolchose/django-cms,stefanfoulis/django-cms,cyberintruder/django-cms,takeshineshiro/django-cms,yakky/django-cms,jeffreylu9/django-cms,bittner/django-cms,AlexProfi/django-cms,DylannCordel/django-cms,bittner/django-cms,intip/django-cms,josjevv/django-cms,leture/django-cms,frnhr/django-cms,jeffreylu9/django-cms,mkoistinen/django-cms,jproffitt/django-cms,DylannCordel/django-cms,Livefyre/django-cms,Vegasvikk/django-cms,SmithsonianEnterprises/django-cms,evildmp/django-cms,nimbis/django-cms,nimbis/django-cms,vad/django-cms,benzkji/django-cms,stefanfoulis/django-cms,irudayarajisawa/django-cms,petecummings/django-cms,Jaccorot/django-cms,jproffitt/django-cms,liuyisiyisi/django-cms,yakky/django-cms,netzkolchose/django-cms,Livefyre/django-cms,frnhr/django-cms,czpython/django-cms,rsalmaso/django-cms,rscnt/django-cms,yakky/django-cms,SachaMPS/django-cms,rryan/django-cms,saintbird/django-cms,wyg3958/django-cms,yakky/django-cms,datakortet/django-cms,andyzsf/django-cms,petecummings/django-cms,benzkji/django-cms,SachaMPS/django-cms,mkoistinen/django-cms,netzkolchose/django-cms,Jaccorot/django-cms,webu/django-cms,bittner/django-cms,chmberl/django-cms,AlexProfi/django-cms
|
Update translations (for internal test project, cms itself has none)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('placeholderapp', '0003_example1_publish'),
]
operations = [
migrations.AlterModelOptions(
name='multilingualexample1translation',
options={'managed': True},
),
]
|
<commit_before><commit_msg>Update translations (for internal test project, cms itself has none)<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('placeholderapp', '0003_example1_publish'),
]
operations = [
migrations.AlterModelOptions(
name='multilingualexample1translation',
options={'managed': True},
),
]
|
Update translations (for internal test project, cms itself has none)# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('placeholderapp', '0003_example1_publish'),
]
operations = [
migrations.AlterModelOptions(
name='multilingualexample1translation',
options={'managed': True},
),
]
|
<commit_before><commit_msg>Update translations (for internal test project, cms itself has none)<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('placeholderapp', '0003_example1_publish'),
]
operations = [
migrations.AlterModelOptions(
name='multilingualexample1translation',
options={'managed': True},
),
]
|
|
0401f8bfb47710f97ae9c665c2476e22e05b18d0
|
src/tests/test_connection.py
|
src/tests/test_connection.py
|
# -*- coding: utf-8
import json
import random
import string
import tornado.gen
import tornado.testing
import tornado.web
import tornado.websocket
from sidecar.connection import Connection
class AsyncSockJSClient(object):
def __init__(self, ws):
self.ws = ws
def send(self, data):
self.ws.write_message(json.dumps(data))
@tornado.gen.coroutine
def read(self):
response = yield self.ws.read_message()
assert response[0] == 'a'
messages = [json.loads(payload) for payload in json.loads(response[1:])]
raise tornado.gen.Return(messages)
class TestWebSocket(tornado.testing.AsyncHTTPTestCase):
def setup(cls):
random.seed(42)
def get_app(self):
return Connection.tornado_app({}, 'foo')
@tornado.gen.coroutine
def connect(self):
r1 = str(random.randint(0, 1000))
conn_id = ''.join(random.choice(string.ascii_letters) for _ in range(8))
port = self.get_http_port()
url = 'ws://localhost:{}/api/{}/{}/websocket'.format(port, r1, conn_id)
ws = yield tornado.websocket.websocket_connect(url)
response = yield ws.read_message()
assert response == 'o' # SockJS opening tag
raise tornado.gen.Return(AsyncSockJSClient(ws))
@tornado.testing.gen_test
def test_handshake(self):
client = yield self.connect()
response = yield client.read()
assert response == [{'kind': 'ready', 'data': {}}]
|
Implement async test framework for the server
|
Implement async test framework for the server
|
Python
|
apache-2.0
|
aldanor/sidecar,aldanor/sidecar,aldanor/sidecar
|
Implement async test framework for the server
|
# -*- coding: utf-8
import json
import random
import string
import tornado.gen
import tornado.testing
import tornado.web
import tornado.websocket
from sidecar.connection import Connection
class AsyncSockJSClient(object):
def __init__(self, ws):
self.ws = ws
def send(self, data):
self.ws.write_message(json.dumps(data))
@tornado.gen.coroutine
def read(self):
response = yield self.ws.read_message()
assert response[0] == 'a'
messages = [json.loads(payload) for payload in json.loads(response[1:])]
raise tornado.gen.Return(messages)
class TestWebSocket(tornado.testing.AsyncHTTPTestCase):
def setup(cls):
random.seed(42)
def get_app(self):
return Connection.tornado_app({}, 'foo')
@tornado.gen.coroutine
def connect(self):
r1 = str(random.randint(0, 1000))
conn_id = ''.join(random.choice(string.ascii_letters) for _ in range(8))
port = self.get_http_port()
url = 'ws://localhost:{}/api/{}/{}/websocket'.format(port, r1, conn_id)
ws = yield tornado.websocket.websocket_connect(url)
response = yield ws.read_message()
assert response == 'o' # SockJS opening tag
raise tornado.gen.Return(AsyncSockJSClient(ws))
@tornado.testing.gen_test
def test_handshake(self):
client = yield self.connect()
response = yield client.read()
assert response == [{'kind': 'ready', 'data': {}}]
|
<commit_before><commit_msg>Implement async test framework for the server<commit_after>
|
# -*- coding: utf-8
import json
import random
import string
import tornado.gen
import tornado.testing
import tornado.web
import tornado.websocket
from sidecar.connection import Connection
class AsyncSockJSClient(object):
def __init__(self, ws):
self.ws = ws
def send(self, data):
self.ws.write_message(json.dumps(data))
@tornado.gen.coroutine
def read(self):
response = yield self.ws.read_message()
assert response[0] == 'a'
messages = [json.loads(payload) for payload in json.loads(response[1:])]
raise tornado.gen.Return(messages)
class TestWebSocket(tornado.testing.AsyncHTTPTestCase):
def setup(cls):
random.seed(42)
def get_app(self):
return Connection.tornado_app({}, 'foo')
@tornado.gen.coroutine
def connect(self):
r1 = str(random.randint(0, 1000))
conn_id = ''.join(random.choice(string.ascii_letters) for _ in range(8))
port = self.get_http_port()
url = 'ws://localhost:{}/api/{}/{}/websocket'.format(port, r1, conn_id)
ws = yield tornado.websocket.websocket_connect(url)
response = yield ws.read_message()
assert response == 'o' # SockJS opening tag
raise tornado.gen.Return(AsyncSockJSClient(ws))
@tornado.testing.gen_test
def test_handshake(self):
client = yield self.connect()
response = yield client.read()
assert response == [{'kind': 'ready', 'data': {}}]
|
Implement async test framework for the server# -*- coding: utf-8
import json
import random
import string
import tornado.gen
import tornado.testing
import tornado.web
import tornado.websocket
from sidecar.connection import Connection
class AsyncSockJSClient(object):
def __init__(self, ws):
self.ws = ws
def send(self, data):
self.ws.write_message(json.dumps(data))
@tornado.gen.coroutine
def read(self):
response = yield self.ws.read_message()
assert response[0] == 'a'
messages = [json.loads(payload) for payload in json.loads(response[1:])]
raise tornado.gen.Return(messages)
class TestWebSocket(tornado.testing.AsyncHTTPTestCase):
def setup(cls):
random.seed(42)
def get_app(self):
return Connection.tornado_app({}, 'foo')
@tornado.gen.coroutine
def connect(self):
r1 = str(random.randint(0, 1000))
conn_id = ''.join(random.choice(string.ascii_letters) for _ in range(8))
port = self.get_http_port()
url = 'ws://localhost:{}/api/{}/{}/websocket'.format(port, r1, conn_id)
ws = yield tornado.websocket.websocket_connect(url)
response = yield ws.read_message()
assert response == 'o' # SockJS opening tag
raise tornado.gen.Return(AsyncSockJSClient(ws))
@tornado.testing.gen_test
def test_handshake(self):
client = yield self.connect()
response = yield client.read()
assert response == [{'kind': 'ready', 'data': {}}]
|
<commit_before><commit_msg>Implement async test framework for the server<commit_after># -*- coding: utf-8
import json
import random
import string
import tornado.gen
import tornado.testing
import tornado.web
import tornado.websocket
from sidecar.connection import Connection
class AsyncSockJSClient(object):
def __init__(self, ws):
self.ws = ws
def send(self, data):
self.ws.write_message(json.dumps(data))
@tornado.gen.coroutine
def read(self):
response = yield self.ws.read_message()
assert response[0] == 'a'
messages = [json.loads(payload) for payload in json.loads(response[1:])]
raise tornado.gen.Return(messages)
class TestWebSocket(tornado.testing.AsyncHTTPTestCase):
def setup(cls):
random.seed(42)
def get_app(self):
return Connection.tornado_app({}, 'foo')
@tornado.gen.coroutine
def connect(self):
r1 = str(random.randint(0, 1000))
conn_id = ''.join(random.choice(string.ascii_letters) for _ in range(8))
port = self.get_http_port()
url = 'ws://localhost:{}/api/{}/{}/websocket'.format(port, r1, conn_id)
ws = yield tornado.websocket.websocket_connect(url)
response = yield ws.read_message()
assert response == 'o' # SockJS opening tag
raise tornado.gen.Return(AsyncSockJSClient(ws))
@tornado.testing.gen_test
def test_handshake(self):
client = yield self.connect()
response = yield client.read()
assert response == [{'kind': 'ready', 'data': {}}]
|
|
df556cb5f17dc05ca04c7f2bdd59637d39b06c52
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='fileup',
version=0.1,
description='Easily upload files to an FTP-server and get back the url.',
url='https://github.com/basnijholt/fileup',
author='Bas Nijholt',
license='BSD 3-clause',
py_modules=["fileup"],
entry_points={'console_scripts': ['fu=fileup:main']}
)
|
Make fileup installable and callable with 'fu fname_here.py'
|
Make fileup installable and callable with 'fu fname_here.py'
|
Python
|
bsd-3-clause
|
basnijholt/fileup
|
Make fileup installable and callable with 'fu fname_here.py'
|
from setuptools import setup
setup(name='fileup',
version=0.1,
description='Easily upload files to an FTP-server and get back the url.',
url='https://github.com/basnijholt/fileup',
author='Bas Nijholt',
license='BSD 3-clause',
py_modules=["fileup"],
entry_points={'console_scripts': ['fu=fileup:main']}
)
|
<commit_before><commit_msg>Make fileup installable and callable with 'fu fname_here.py'<commit_after>
|
from setuptools import setup
setup(name='fileup',
version=0.1,
description='Easily upload files to an FTP-server and get back the url.',
url='https://github.com/basnijholt/fileup',
author='Bas Nijholt',
license='BSD 3-clause',
py_modules=["fileup"],
entry_points={'console_scripts': ['fu=fileup:main']}
)
|
Make fileup installable and callable with 'fu fname_here.py'from setuptools import setup
setup(name='fileup',
version=0.1,
description='Easily upload files to an FTP-server and get back the url.',
url='https://github.com/basnijholt/fileup',
author='Bas Nijholt',
license='BSD 3-clause',
py_modules=["fileup"],
entry_points={'console_scripts': ['fu=fileup:main']}
)
|
<commit_before><commit_msg>Make fileup installable and callable with 'fu fname_here.py'<commit_after>from setuptools import setup
setup(name='fileup',
version=0.1,
description='Easily upload files to an FTP-server and get back the url.',
url='https://github.com/basnijholt/fileup',
author='Bas Nijholt',
license='BSD 3-clause',
py_modules=["fileup"],
entry_points={'console_scripts': ['fu=fileup:main']}
)
|
|
661c89f9342de4ec15137fed45e9be54864f8864
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
zip_safe=False,
install_requires=[
'uuid',
]
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
Include uuid module as req (for older python versions) and mark zip_safe as False
|
Include uuid module as req (for older python versions) and mark zip_safe as False
|
Python
|
bsd-3-clause
|
nebstrebor/django-shortuuidfield,mriveralee/django-shortuuidfield,kracekumar/django-uuidfield,dcramer/django-uuidfield
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)Include uuid module as req (for older python versions) and mark zip_safe as False
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
zip_safe=False,
install_requires=[
'uuid',
]
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)<commit_msg>Include uuid module as req (for older python versions) and mark zip_safe as False<commit_after>
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
zip_safe=False,
install_requires=[
'uuid',
]
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)Include uuid module as req (for older python versions) and mark zip_safe as False#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
zip_safe=False,
install_requires=[
'uuid',
]
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)<commit_msg>Include uuid module as req (for older python versions) and mark zip_safe as False<commit_after>#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-uuidfield',
version=".".join(map(str, __import__('uuidfield').__version__)),
author='David Cramer',
author_email='dcramer@gmail.com',
description='UUIDField in Django',
url='http://github.com/dcramer/django-uuidfield',
zip_safe=False,
install_requires=[
'uuid',
]
packages=find_packages(),
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
21d97ca9417142400d4ca89757ed312bf1066922
|
setup.py
|
setup.py
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=[
'jinja2',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
install_requires = ['jinja2']
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=install_requires,
extras_require={
'yaml': install_requires + ['pyyaml'],
},
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
Add a [yaml] extra installer
|
Add a [yaml] extra installer
|
Python
|
bsd-2-clause
|
mattrobenolt/jinja2-cli,ralexander-phi/jinja2-cli
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=[
'jinja2',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
Add a [yaml] extra installer
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
install_requires = ['jinja2']
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=install_requires,
extras_require={
'yaml': install_requires + ['pyyaml'],
},
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
<commit_before>#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=[
'jinja2',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
<commit_msg>Add a [yaml] extra installer<commit_after>
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
install_requires = ['jinja2']
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=install_requires,
extras_require={
'yaml': install_requires + ['pyyaml'],
},
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=[
'jinja2',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
Add a [yaml] extra installer#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
install_requires = ['jinja2']
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=install_requires,
extras_require={
'yaml': install_requires + ['pyyaml'],
},
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
<commit_before>#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=[
'jinja2',
],
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
<commit_msg>Add a [yaml] extra installer<commit_after>#!/usr/bin/env python
"""
jinja2-cli
==========
.. code:: shell
$ jinja2 helloworld.tmpl data.json --format=json
$ cat data.json | jinja2 helloworld.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl
$ curl -s http://httpbin.org/ip | jinja2 helloip.tmpl > helloip.html
"""
from setuptools import setup, find_packages
install_requires = ['jinja2']
setup(
name='jinja2-cli',
version='0.4.3',
author='Matt Robenolt',
author_email='matt@ydekproductions.com',
url='https://github.com/mattrobenolt/jinja2-cli',
description='A CLI interface to Jinja2',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
license='BSD',
install_requires=install_requires,
extras_require={
'yaml': install_requires + ['pyyaml'],
},
tests_require=[
'nose',
],
test_suite='nose.collector',
include_package_data=True,
entry_points={
'console_scripts': [
'jinja2 = jinja2cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
fc21078f0e9327800637a74d4127c666f88c9a88
|
test/global_variables/TestGlobalVariables.py
|
test/global_variables/TestGlobalVariables.py
|
"""Show global variables and check that they do indeed have global scopes."""
import os, time
import lldb
import unittest
main = False
class TestClassTypes(unittest.TestCase):
def setUp(self):
global main
# Save old working directory.
self.oldcwd = os.getcwd()
# Change current working directory if ${LLDB_TEST} is defined.
if ("LLDB_TEST" in os.environ):
os.chdir(os.path.join(os.environ["LLDB_TEST"], "global_variables"));
self.dbg = lldb.SBDebugger.Create() if main else lldb.DBG
if not self.dbg.IsValid():
raise Exception('Invalid debugger instance')
self.dbg.SetAsync(False)
self.ci = self.dbg.GetCommandInterpreter()
if not self.ci:
raise Exception('Could not get the command interpreter')
def tearDown(self):
# Restore old working directory.
os.chdir(self.oldcwd)
del self.dbg
def test_global_variables(self):
"""Test 'variable list -s -a' which omits args and shows scopes."""
res = lldb.SBCommandReturnObject()
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break inside the main.
self.ci.HandleCommand("breakpoint set -f main.c -l 20", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: file ='main.c', line = 20, locations = 1"))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint.
self.ci.HandleCommand("thread list", res)
print "thread list ->", res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('state is Stopped') and
res.GetOutput().find('stop reason = breakpoint'))
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1'))
# Check that GLOBAL scopes are indicated for the variables.
self.ci.HandleCommand("variable list -s -a", res);
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('GLOBAL: g_file_static_cstr') and
res.GetOutput().find('GLOBAL: g_file_global_int') and
res.GetOutput().find('GLOBAL: g_file_global_cstr'))
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
main = True
unittest.main()
lldb.SBDebugger.Terminate()
|
Add a test to show global variables and to verify that they do display as having global scopes.
|
Add a test to show global variables and to verify that they do display as having
global scopes.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107522 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb
|
Add a test to show global variables and to verify that they do display as having
global scopes.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107522 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""Show global variables and check that they do indeed have global scopes."""
import os, time
import lldb
import unittest
main = False
class TestClassTypes(unittest.TestCase):
def setUp(self):
global main
# Save old working directory.
self.oldcwd = os.getcwd()
# Change current working directory if ${LLDB_TEST} is defined.
if ("LLDB_TEST" in os.environ):
os.chdir(os.path.join(os.environ["LLDB_TEST"], "global_variables"));
self.dbg = lldb.SBDebugger.Create() if main else lldb.DBG
if not self.dbg.IsValid():
raise Exception('Invalid debugger instance')
self.dbg.SetAsync(False)
self.ci = self.dbg.GetCommandInterpreter()
if not self.ci:
raise Exception('Could not get the command interpreter')
def tearDown(self):
# Restore old working directory.
os.chdir(self.oldcwd)
del self.dbg
def test_global_variables(self):
"""Test 'variable list -s -a' which omits args and shows scopes."""
res = lldb.SBCommandReturnObject()
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break inside the main.
self.ci.HandleCommand("breakpoint set -f main.c -l 20", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: file ='main.c', line = 20, locations = 1"))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint.
self.ci.HandleCommand("thread list", res)
print "thread list ->", res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('state is Stopped') and
res.GetOutput().find('stop reason = breakpoint'))
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1'))
# Check that GLOBAL scopes are indicated for the variables.
self.ci.HandleCommand("variable list -s -a", res);
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('GLOBAL: g_file_static_cstr') and
res.GetOutput().find('GLOBAL: g_file_global_int') and
res.GetOutput().find('GLOBAL: g_file_global_cstr'))
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
main = True
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Add a test to show global variables and to verify that they do display as having
global scopes.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107522 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""Show global variables and check that they do indeed have global scopes."""
import os, time
import lldb
import unittest
main = False
class TestClassTypes(unittest.TestCase):
def setUp(self):
global main
# Save old working directory.
self.oldcwd = os.getcwd()
# Change current working directory if ${LLDB_TEST} is defined.
if ("LLDB_TEST" in os.environ):
os.chdir(os.path.join(os.environ["LLDB_TEST"], "global_variables"));
self.dbg = lldb.SBDebugger.Create() if main else lldb.DBG
if not self.dbg.IsValid():
raise Exception('Invalid debugger instance')
self.dbg.SetAsync(False)
self.ci = self.dbg.GetCommandInterpreter()
if not self.ci:
raise Exception('Could not get the command interpreter')
def tearDown(self):
# Restore old working directory.
os.chdir(self.oldcwd)
del self.dbg
def test_global_variables(self):
"""Test 'variable list -s -a' which omits args and shows scopes."""
res = lldb.SBCommandReturnObject()
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break inside the main.
self.ci.HandleCommand("breakpoint set -f main.c -l 20", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: file ='main.c', line = 20, locations = 1"))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint.
self.ci.HandleCommand("thread list", res)
print "thread list ->", res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('state is Stopped') and
res.GetOutput().find('stop reason = breakpoint'))
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1'))
# Check that GLOBAL scopes are indicated for the variables.
self.ci.HandleCommand("variable list -s -a", res);
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('GLOBAL: g_file_static_cstr') and
res.GetOutput().find('GLOBAL: g_file_global_int') and
res.GetOutput().find('GLOBAL: g_file_global_cstr'))
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
main = True
unittest.main()
lldb.SBDebugger.Terminate()
|
Add a test to show global variables and to verify that they do display as having
global scopes.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107522 91177308-0d34-0410-b5e6-96231b3b80d8"""Show global variables and check that they do indeed have global scopes."""
import os, time
import lldb
import unittest
main = False
class TestClassTypes(unittest.TestCase):
def setUp(self):
global main
# Save old working directory.
self.oldcwd = os.getcwd()
# Change current working directory if ${LLDB_TEST} is defined.
if ("LLDB_TEST" in os.environ):
os.chdir(os.path.join(os.environ["LLDB_TEST"], "global_variables"));
self.dbg = lldb.SBDebugger.Create() if main else lldb.DBG
if not self.dbg.IsValid():
raise Exception('Invalid debugger instance')
self.dbg.SetAsync(False)
self.ci = self.dbg.GetCommandInterpreter()
if not self.ci:
raise Exception('Could not get the command interpreter')
def tearDown(self):
# Restore old working directory.
os.chdir(self.oldcwd)
del self.dbg
def test_global_variables(self):
"""Test 'variable list -s -a' which omits args and shows scopes."""
res = lldb.SBCommandReturnObject()
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break inside the main.
self.ci.HandleCommand("breakpoint set -f main.c -l 20", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: file ='main.c', line = 20, locations = 1"))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint.
self.ci.HandleCommand("thread list", res)
print "thread list ->", res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('state is Stopped') and
res.GetOutput().find('stop reason = breakpoint'))
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1'))
# Check that GLOBAL scopes are indicated for the variables.
self.ci.HandleCommand("variable list -s -a", res);
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('GLOBAL: g_file_static_cstr') and
res.GetOutput().find('GLOBAL: g_file_global_int') and
res.GetOutput().find('GLOBAL: g_file_global_cstr'))
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
main = True
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Add a test to show global variables and to verify that they do display as having
global scopes.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107522 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""Show global variables and check that they do indeed have global scopes."""
import os, time
import lldb
import unittest
main = False
class TestClassTypes(unittest.TestCase):
def setUp(self):
global main
# Save old working directory.
self.oldcwd = os.getcwd()
# Change current working directory if ${LLDB_TEST} is defined.
if ("LLDB_TEST" in os.environ):
os.chdir(os.path.join(os.environ["LLDB_TEST"], "global_variables"));
self.dbg = lldb.SBDebugger.Create() if main else lldb.DBG
if not self.dbg.IsValid():
raise Exception('Invalid debugger instance')
self.dbg.SetAsync(False)
self.ci = self.dbg.GetCommandInterpreter()
if not self.ci:
raise Exception('Could not get the command interpreter')
def tearDown(self):
# Restore old working directory.
os.chdir(self.oldcwd)
del self.dbg
def test_global_variables(self):
"""Test 'variable list -s -a' which omits args and shows scopes."""
res = lldb.SBCommandReturnObject()
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break inside the main.
self.ci.HandleCommand("breakpoint set -f main.c -l 20", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: file ='main.c', line = 20, locations = 1"))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint.
self.ci.HandleCommand("thread list", res)
print "thread list ->", res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('state is Stopped') and
res.GetOutput().find('stop reason = breakpoint'))
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1'))
# Check that GLOBAL scopes are indicated for the variables.
self.ci.HandleCommand("variable list -s -a", res);
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find('GLOBAL: g_file_static_cstr') and
res.GetOutput().find('GLOBAL: g_file_global_int') and
res.GetOutput().find('GLOBAL: g_file_global_cstr'))
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
main = True
unittest.main()
lldb.SBDebugger.Terminate()
|
|
af6a1dd34fe9323a30da2c1d998aee687c2b46e1
|
people/migrations/0005_auto_20160507_1207.py
|
people/migrations/0005_auto_20160507_1207.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0004_auto_20150402_1740'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
]
|
Add a migration for the `people` app
|
Add a migration for the `people` app
|
Python
|
bsd-3-clause
|
WebCampZg/conference-web,WebCampZg/conference-web,WebCampZg/conference-web
|
Add a migration for the `people` app
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0004_auto_20150402_1740'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
]
|
<commit_before><commit_msg>Add a migration for the `people` app<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0004_auto_20150402_1740'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
]
|
Add a migration for the `people` app# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0004_auto_20150402_1740'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
]
|
<commit_before><commit_msg>Add a migration for the `people` app<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0004_auto_20150402_1740'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
]
|
|
af80e03af16a36a393d2bf060ae6aac3622370dc
|
sbgnpdschema/src/doc/generate_documentation.py
|
sbgnpdschema/src/doc/generate_documentation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import libxml2
import libxslt
def generate_documentation(src_file, destination_file, stylesheet_file):
stylesheet_args = dict()
style = libxslt.parseStylesheetFile(stylesheet_file)
document = libxml2.parseFile(src_file)
result = style.applyStylesheet(document, stylesheet_args)
fh = open(destination_file, "w")
style.saveResultToFile(fh, result)
fh.close()
def main():
parser = optparse.OptionParser()
parser.add_option("-o", "--output-file", metavar="FILE",
dest="output", help="Save HTML doc to FILE")
parser.add_option("-s", "--stylesheet-file", metavar="FILE",
dest="stylesheet", help="Use FILE as stylesheet")
options, arguments = parser.parse_args()
if not options.output:
parser.error("Output file is required.")
if not options.stylesheet or not os.path.isfile(options.stylesheet):
parser.error("Stylesheet file is required")
if not arguments or not os.path.isfile(arguments[0]):
parser.error("Source XSD file required")
source = arguments[0]
generate_documentation(source, options.output, options.stylesheet)
if __name__ == '__main__':
main()
|
Add a Python based documentation generator, which requires libxml2 and libxlst.
|
Add a Python based documentation generator, which requires libxml2 and libxlst.
|
Python
|
lgpl-2.1
|
dc-atlas/bcml,dc-atlas/bcml,dc-atlas/bcml,dc-atlas/bcml
|
Add a Python based documentation generator, which requires libxml2 and libxlst.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import libxml2
import libxslt
def generate_documentation(src_file, destination_file, stylesheet_file):
stylesheet_args = dict()
style = libxslt.parseStylesheetFile(stylesheet_file)
document = libxml2.parseFile(src_file)
result = style.applyStylesheet(document, stylesheet_args)
fh = open(destination_file, "w")
style.saveResultToFile(fh, result)
fh.close()
def main():
parser = optparse.OptionParser()
parser.add_option("-o", "--output-file", metavar="FILE",
dest="output", help="Save HTML doc to FILE")
parser.add_option("-s", "--stylesheet-file", metavar="FILE",
dest="stylesheet", help="Use FILE as stylesheet")
options, arguments = parser.parse_args()
if not options.output:
parser.error("Output file is required.")
if not options.stylesheet or not os.path.isfile(options.stylesheet):
parser.error("Stylesheet file is required")
if not arguments or not os.path.isfile(arguments[0]):
parser.error("Source XSD file required")
source = arguments[0]
generate_documentation(source, options.output, options.stylesheet)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a Python based documentation generator, which requires libxml2 and libxlst.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import libxml2
import libxslt
def generate_documentation(src_file, destination_file, stylesheet_file):
stylesheet_args = dict()
style = libxslt.parseStylesheetFile(stylesheet_file)
document = libxml2.parseFile(src_file)
result = style.applyStylesheet(document, stylesheet_args)
fh = open(destination_file, "w")
style.saveResultToFile(fh, result)
fh.close()
def main():
parser = optparse.OptionParser()
parser.add_option("-o", "--output-file", metavar="FILE",
dest="output", help="Save HTML doc to FILE")
parser.add_option("-s", "--stylesheet-file", metavar="FILE",
dest="stylesheet", help="Use FILE as stylesheet")
options, arguments = parser.parse_args()
if not options.output:
parser.error("Output file is required.")
if not options.stylesheet or not os.path.isfile(options.stylesheet):
parser.error("Stylesheet file is required")
if not arguments or not os.path.isfile(arguments[0]):
parser.error("Source XSD file required")
source = arguments[0]
generate_documentation(source, options.output, options.stylesheet)
if __name__ == '__main__':
main()
|
Add a Python based documentation generator, which requires libxml2 and libxlst.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import libxml2
import libxslt
def generate_documentation(src_file, destination_file, stylesheet_file):
stylesheet_args = dict()
style = libxslt.parseStylesheetFile(stylesheet_file)
document = libxml2.parseFile(src_file)
result = style.applyStylesheet(document, stylesheet_args)
fh = open(destination_file, "w")
style.saveResultToFile(fh, result)
fh.close()
def main():
parser = optparse.OptionParser()
parser.add_option("-o", "--output-file", metavar="FILE",
dest="output", help="Save HTML doc to FILE")
parser.add_option("-s", "--stylesheet-file", metavar="FILE",
dest="stylesheet", help="Use FILE as stylesheet")
options, arguments = parser.parse_args()
if not options.output:
parser.error("Output file is required.")
if not options.stylesheet or not os.path.isfile(options.stylesheet):
parser.error("Stylesheet file is required")
if not arguments or not os.path.isfile(arguments[0]):
parser.error("Source XSD file required")
source = arguments[0]
generate_documentation(source, options.output, options.stylesheet)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a Python based documentation generator, which requires libxml2 and libxlst.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import libxml2
import libxslt
def generate_documentation(src_file, destination_file, stylesheet_file):
stylesheet_args = dict()
style = libxslt.parseStylesheetFile(stylesheet_file)
document = libxml2.parseFile(src_file)
result = style.applyStylesheet(document, stylesheet_args)
fh = open(destination_file, "w")
style.saveResultToFile(fh, result)
fh.close()
def main():
parser = optparse.OptionParser()
parser.add_option("-o", "--output-file", metavar="FILE",
dest="output", help="Save HTML doc to FILE")
parser.add_option("-s", "--stylesheet-file", metavar="FILE",
dest="stylesheet", help="Use FILE as stylesheet")
options, arguments = parser.parse_args()
if not options.output:
parser.error("Output file is required.")
if not options.stylesheet or not os.path.isfile(options.stylesheet):
parser.error("Stylesheet file is required")
if not arguments or not os.path.isfile(arguments[0]):
parser.error("Source XSD file required")
source = arguments[0]
generate_documentation(source, options.output, options.stylesheet)
if __name__ == '__main__':
main()
|
|
1b5342f23a8f7d994d82fbf9971c0515ae9c14fe
|
events/management/commands/event_send_reminder.py
|
events/management/commands/event_send_reminder.py
|
import json
import urllib2
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from events.models import Event, Session, Registration
from post_office import mail
class Command(BaseCommand):
help = "Send confirmation emails."
def add_arguments(self, parser):
parser.add_argument('--event-pk', dest='event_pk',
default=None, type=int)
parser.add_argument('--sleep-time', dest='sleep_time',
default=None, type=int)
parser.add_argument('--workshop-event-pk', dest='workshop_event_pk',
default=None, type=int)
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = {}
event = Event.objects.get(pk=options['event_pk'])
list_url = "http://medicalacademy.org/portal/list/registration/get/data?event_id="
for session in Session.objects.filter(event=event, vma_id__isnull=False):
self.stdout.write(u"Getting {}...".format(session))
url = list_url + str(session.vma_id)
data = urllib2.urlopen(url)
users[session.vma_id] = json.load(data)
count = len(users[session.vma_id])
self.stdout.write(u"We got {}.".format(count))
for registration in Registration.objects.filter(is_deleted=False,
reminder_sent=False,
moved_sessions__isnull=False).distinct():
enjaz_sessions = registration.first_priority_sessions.all() | \
registration.second_priority_sessions.all()
if not enjaz_sessions.exists():
continue
session_count = registration.moved_sessions.count()
programs = []
workshops = []
others = []
for session in registration.moved_sessions.all():
if session.vma_time_code:
workshop_event_pk = options['workshop_event_pk']
for user in users[workshop_event_pk]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Workshop: Found {}!".format(user['email']))
workshops.append((session, user['confirmation_link']))
break
elif session.vma_id:
for user in users[session.vma_id]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Program: Found {}!".format(user['email']))
programs.append((session, user['confirmation_link']))
break
else:
others.append(session)
email_context = {'registration': registration,
'session_count': session_count,
'event': event,
'programs': programs,
'workshops': workshops,
'others': others}
mail.send([registration.get_email()],
template="event_registration_reminder",
context=email_context)
registration.reminder_sent = True
registration.save()
self.stdout.write("Sent to {}!".format(registration.get_email()))
if options['sleep_time']:
time.sleep(options['sleep_time'])
|
Add script for sending reminders
|
Add script for sending reminders
|
Python
|
agpl-3.0
|
enjaz/enjaz,enjaz/enjaz,enjaz/enjaz,enjaz/enjaz,osamak/student-portal,osamak/student-portal,osamak/student-portal,osamak/student-portal,osamak/student-portal,enjaz/enjaz
|
Add script for sending reminders
|
import json
import urllib2
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from events.models import Event, Session, Registration
from post_office import mail
class Command(BaseCommand):
help = "Send confirmation emails."
def add_arguments(self, parser):
parser.add_argument('--event-pk', dest='event_pk',
default=None, type=int)
parser.add_argument('--sleep-time', dest='sleep_time',
default=None, type=int)
parser.add_argument('--workshop-event-pk', dest='workshop_event_pk',
default=None, type=int)
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = {}
event = Event.objects.get(pk=options['event_pk'])
list_url = "http://medicalacademy.org/portal/list/registration/get/data?event_id="
for session in Session.objects.filter(event=event, vma_id__isnull=False):
self.stdout.write(u"Getting {}...".format(session))
url = list_url + str(session.vma_id)
data = urllib2.urlopen(url)
users[session.vma_id] = json.load(data)
count = len(users[session.vma_id])
self.stdout.write(u"We got {}.".format(count))
for registration in Registration.objects.filter(is_deleted=False,
reminder_sent=False,
moved_sessions__isnull=False).distinct():
enjaz_sessions = registration.first_priority_sessions.all() | \
registration.second_priority_sessions.all()
if not enjaz_sessions.exists():
continue
session_count = registration.moved_sessions.count()
programs = []
workshops = []
others = []
for session in registration.moved_sessions.all():
if session.vma_time_code:
workshop_event_pk = options['workshop_event_pk']
for user in users[workshop_event_pk]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Workshop: Found {}!".format(user['email']))
workshops.append((session, user['confirmation_link']))
break
elif session.vma_id:
for user in users[session.vma_id]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Program: Found {}!".format(user['email']))
programs.append((session, user['confirmation_link']))
break
else:
others.append(session)
email_context = {'registration': registration,
'session_count': session_count,
'event': event,
'programs': programs,
'workshops': workshops,
'others': others}
mail.send([registration.get_email()],
template="event_registration_reminder",
context=email_context)
registration.reminder_sent = True
registration.save()
self.stdout.write("Sent to {}!".format(registration.get_email()))
if options['sleep_time']:
time.sleep(options['sleep_time'])
|
<commit_before><commit_msg>Add script for sending reminders<commit_after>
|
import json
import urllib2
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from events.models import Event, Session, Registration
from post_office import mail
class Command(BaseCommand):
help = "Send confirmation emails."
def add_arguments(self, parser):
parser.add_argument('--event-pk', dest='event_pk',
default=None, type=int)
parser.add_argument('--sleep-time', dest='sleep_time',
default=None, type=int)
parser.add_argument('--workshop-event-pk', dest='workshop_event_pk',
default=None, type=int)
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = {}
event = Event.objects.get(pk=options['event_pk'])
list_url = "http://medicalacademy.org/portal/list/registration/get/data?event_id="
for session in Session.objects.filter(event=event, vma_id__isnull=False):
self.stdout.write(u"Getting {}...".format(session))
url = list_url + str(session.vma_id)
data = urllib2.urlopen(url)
users[session.vma_id] = json.load(data)
count = len(users[session.vma_id])
self.stdout.write(u"We got {}.".format(count))
for registration in Registration.objects.filter(is_deleted=False,
reminder_sent=False,
moved_sessions__isnull=False).distinct():
enjaz_sessions = registration.first_priority_sessions.all() | \
registration.second_priority_sessions.all()
if not enjaz_sessions.exists():
continue
session_count = registration.moved_sessions.count()
programs = []
workshops = []
others = []
for session in registration.moved_sessions.all():
if session.vma_time_code:
workshop_event_pk = options['workshop_event_pk']
for user in users[workshop_event_pk]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Workshop: Found {}!".format(user['email']))
workshops.append((session, user['confirmation_link']))
break
elif session.vma_id:
for user in users[session.vma_id]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Program: Found {}!".format(user['email']))
programs.append((session, user['confirmation_link']))
break
else:
others.append(session)
email_context = {'registration': registration,
'session_count': session_count,
'event': event,
'programs': programs,
'workshops': workshops,
'others': others}
mail.send([registration.get_email()],
template="event_registration_reminder",
context=email_context)
registration.reminder_sent = True
registration.save()
self.stdout.write("Sent to {}!".format(registration.get_email()))
if options['sleep_time']:
time.sleep(options['sleep_time'])
|
Add script for sending remindersimport json
import urllib2
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from events.models import Event, Session, Registration
from post_office import mail
class Command(BaseCommand):
help = "Send confirmation emails."
def add_arguments(self, parser):
parser.add_argument('--event-pk', dest='event_pk',
default=None, type=int)
parser.add_argument('--sleep-time', dest='sleep_time',
default=None, type=int)
parser.add_argument('--workshop-event-pk', dest='workshop_event_pk',
default=None, type=int)
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = {}
event = Event.objects.get(pk=options['event_pk'])
list_url = "http://medicalacademy.org/portal/list/registration/get/data?event_id="
for session in Session.objects.filter(event=event, vma_id__isnull=False):
self.stdout.write(u"Getting {}...".format(session))
url = list_url + str(session.vma_id)
data = urllib2.urlopen(url)
users[session.vma_id] = json.load(data)
count = len(users[session.vma_id])
self.stdout.write(u"We got {}.".format(count))
for registration in Registration.objects.filter(is_deleted=False,
reminder_sent=False,
moved_sessions__isnull=False).distinct():
enjaz_sessions = registration.first_priority_sessions.all() | \
registration.second_priority_sessions.all()
if not enjaz_sessions.exists():
continue
session_count = registration.moved_sessions.count()
programs = []
workshops = []
others = []
for session in registration.moved_sessions.all():
if session.vma_time_code:
workshop_event_pk = options['workshop_event_pk']
for user in users[workshop_event_pk]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Workshop: Found {}!".format(user['email']))
workshops.append((session, user['confirmation_link']))
break
elif session.vma_id:
for user in users[session.vma_id]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Program: Found {}!".format(user['email']))
programs.append((session, user['confirmation_link']))
break
else:
others.append(session)
email_context = {'registration': registration,
'session_count': session_count,
'event': event,
'programs': programs,
'workshops': workshops,
'others': others}
mail.send([registration.get_email()],
template="event_registration_reminder",
context=email_context)
registration.reminder_sent = True
registration.save()
self.stdout.write("Sent to {}!".format(registration.get_email()))
if options['sleep_time']:
time.sleep(options['sleep_time'])
|
<commit_before><commit_msg>Add script for sending reminders<commit_after>import json
import urllib2
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from events.models import Event, Session, Registration
from post_office import mail
class Command(BaseCommand):
help = "Send confirmation emails."
def add_arguments(self, parser):
parser.add_argument('--event-pk', dest='event_pk',
default=None, type=int)
parser.add_argument('--sleep-time', dest='sleep_time',
default=None, type=int)
parser.add_argument('--workshop-event-pk', dest='workshop_event_pk',
default=None, type=int)
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = {}
event = Event.objects.get(pk=options['event_pk'])
list_url = "http://medicalacademy.org/portal/list/registration/get/data?event_id="
for session in Session.objects.filter(event=event, vma_id__isnull=False):
self.stdout.write(u"Getting {}...".format(session))
url = list_url + str(session.vma_id)
data = urllib2.urlopen(url)
users[session.vma_id] = json.load(data)
count = len(users[session.vma_id])
self.stdout.write(u"We got {}.".format(count))
for registration in Registration.objects.filter(is_deleted=False,
reminder_sent=False,
moved_sessions__isnull=False).distinct():
enjaz_sessions = registration.first_priority_sessions.all() | \
registration.second_priority_sessions.all()
if not enjaz_sessions.exists():
continue
session_count = registration.moved_sessions.count()
programs = []
workshops = []
others = []
for session in registration.moved_sessions.all():
if session.vma_time_code:
workshop_event_pk = options['workshop_event_pk']
for user in users[workshop_event_pk]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Workshop: Found {}!".format(user['email']))
workshops.append((session, user['confirmation_link']))
break
elif session.vma_id:
for user in users[session.vma_id]:
if user['email'].lower() == registration.get_email().lower():
self.stdout.write("Program: Found {}!".format(user['email']))
programs.append((session, user['confirmation_link']))
break
else:
others.append(session)
email_context = {'registration': registration,
'session_count': session_count,
'event': event,
'programs': programs,
'workshops': workshops,
'others': others}
mail.send([registration.get_email()],
template="event_registration_reminder",
context=email_context)
registration.reminder_sent = True
registration.save()
self.stdout.write("Sent to {}!".format(registration.get_email()))
if options['sleep_time']:
time.sleep(options['sleep_time'])
|
|
90f02b8fd7c62be0fbda1e917ee2dea64ec1a47d
|
test/command_line/test_reciprocal_lattice_viewer.py
|
test/command_line/test_reciprocal_lattice_viewer.py
|
def test_gltbx_is_available():
"""
This is not a real test for dials.rlv, which is slightly difficult to write.
However, one common error mode is that the gltbx libraries are not available
because they were not built earlier. This will reliably cause dials.rlv to
fail even thought the build setup was apparently fine.
"""
import gltbx.gl
assert gltbx.gl.ext
|
Add a "test" for dials.reciprocal_lattice_viewer
|
Add a "test" for dials.reciprocal_lattice_viewer
|
Python
|
bsd-3-clause
|
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
|
Add a "test" for dials.reciprocal_lattice_viewer
|
def test_gltbx_is_available():
"""
This is not a real test for dials.rlv, which is slightly difficult to write.
However, one common error mode is that the gltbx libraries are not available
because they were not built earlier. This will reliably cause dials.rlv to
fail even thought the build setup was apparently fine.
"""
import gltbx.gl
assert gltbx.gl.ext
|
<commit_before><commit_msg>Add a "test" for dials.reciprocal_lattice_viewer<commit_after>
|
def test_gltbx_is_available():
"""
This is not a real test for dials.rlv, which is slightly difficult to write.
However, one common error mode is that the gltbx libraries are not available
because they were not built earlier. This will reliably cause dials.rlv to
fail even thought the build setup was apparently fine.
"""
import gltbx.gl
assert gltbx.gl.ext
|
Add a "test" for dials.reciprocal_lattice_viewerdef test_gltbx_is_available():
"""
This is not a real test for dials.rlv, which is slightly difficult to write.
However, one common error mode is that the gltbx libraries are not available
because they were not built earlier. This will reliably cause dials.rlv to
fail even thought the build setup was apparently fine.
"""
import gltbx.gl
assert gltbx.gl.ext
|
<commit_before><commit_msg>Add a "test" for dials.reciprocal_lattice_viewer<commit_after>def test_gltbx_is_available():
"""
This is not a real test for dials.rlv, which is slightly difficult to write.
However, one common error mode is that the gltbx libraries are not available
because they were not built earlier. This will reliably cause dials.rlv to
fail even thought the build setup was apparently fine.
"""
import gltbx.gl
assert gltbx.gl.ext
|
|
46c60ace0c48254c67b121d3dda705a49b9da542
|
apps/core/migrations/0006_auto_20171017_1257.py
|
apps/core/migrations/0006_auto_20171017_1257.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 12:57
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_strain_reference'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='omics_area',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', related_query_name='experiment', to='core.OmicsArea'),
),
]
|
Add migration for Strain.reference TreeForeignKey
|
Add migration for Strain.reference TreeForeignKey
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add migration for Strain.reference TreeForeignKey
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 12:57
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_strain_reference'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='omics_area',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', related_query_name='experiment', to='core.OmicsArea'),
),
]
|
<commit_before><commit_msg>Add migration for Strain.reference TreeForeignKey<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 12:57
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_strain_reference'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='omics_area',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', related_query_name='experiment', to='core.OmicsArea'),
),
]
|
Add migration for Strain.reference TreeForeignKey# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 12:57
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_strain_reference'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='omics_area',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', related_query_name='experiment', to='core.OmicsArea'),
),
]
|
<commit_before><commit_msg>Add migration for Strain.reference TreeForeignKey<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 12:57
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0005_strain_reference'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='omics_area',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', related_query_name='experiment', to='core.OmicsArea'),
),
]
|
|
17950b2dc3d652e5be5c9be90fcb6512edabe480
|
sandpit/find-outliers.py
|
sandpit/find-outliers.py
|
#!python3
import statistics
#
# Explanation from: http://www.wikihow.com/Calculate-Outliers
#
numbers = [1.0, 2.0, 2.3, 3.0, 3.2, 4.0, 100.0, 4.5, 5.11, 6.0, 8.0]
#~ numbers = [71.0, 70.0, 73.0, 70.0, 70.0, 69.0, 70.0, 72.0, 71.0, 300.0, 71.0, 69.0]
numbers_in_order = sorted(numbers)
print("Numbers:", numbers_in_order)
q2 = statistics.median(numbers)
print("Q2:", q2)
lower_half = [n for n in numbers_in_order if n < q2]
q1 = statistics.median(lower_half)
print("Q1:", q1)
upper_half = [n for n in numbers_in_order if n > q2]
q3 = statistics.median(upper_half)
print("Q3:", q3)
interquartile = q3 - q1
print("Interquartile:", interquartile)
inner_offset = interquartile * 1.5
inner_fences = q1 - inner_offset, q3 + inner_offset
print("Inner fences:", inner_fences)
outer_offset = interquartile * 3.0
lower_fence, higher_fence = outer_fences = q1 - outer_offset, q3 + outer_offset
print("Outer fences:", outer_fences)
data_without_outliers = [n for n in numbers if lower_fence <= n <= higher_fence]
print("Data without outliers:", data_without_outliers)
|
Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)
|
Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)
|
Python
|
mit
|
westpark/robotics
|
Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)
|
#!python3
import statistics
#
# Explanation from: http://www.wikihow.com/Calculate-Outliers
#
numbers = [1.0, 2.0, 2.3, 3.0, 3.2, 4.0, 100.0, 4.5, 5.11, 6.0, 8.0]
#~ numbers = [71.0, 70.0, 73.0, 70.0, 70.0, 69.0, 70.0, 72.0, 71.0, 300.0, 71.0, 69.0]
numbers_in_order = sorted(numbers)
print("Numbers:", numbers_in_order)
q2 = statistics.median(numbers)
print("Q2:", q2)
lower_half = [n for n in numbers_in_order if n < q2]
q1 = statistics.median(lower_half)
print("Q1:", q1)
upper_half = [n for n in numbers_in_order if n > q2]
q3 = statistics.median(upper_half)
print("Q3:", q3)
interquartile = q3 - q1
print("Interquartile:", interquartile)
inner_offset = interquartile * 1.5
inner_fences = q1 - inner_offset, q3 + inner_offset
print("Inner fences:", inner_fences)
outer_offset = interquartile * 3.0
lower_fence, higher_fence = outer_fences = q1 - outer_offset, q3 + outer_offset
print("Outer fences:", outer_fences)
data_without_outliers = [n for n in numbers if lower_fence <= n <= higher_fence]
print("Data without outliers:", data_without_outliers)
|
<commit_before><commit_msg>Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)<commit_after>
|
#!python3
import statistics
#
# Explanation from: http://www.wikihow.com/Calculate-Outliers
#
numbers = [1.0, 2.0, 2.3, 3.0, 3.2, 4.0, 100.0, 4.5, 5.11, 6.0, 8.0]
#~ numbers = [71.0, 70.0, 73.0, 70.0, 70.0, 69.0, 70.0, 72.0, 71.0, 300.0, 71.0, 69.0]
numbers_in_order = sorted(numbers)
print("Numbers:", numbers_in_order)
q2 = statistics.median(numbers)
print("Q2:", q2)
lower_half = [n for n in numbers_in_order if n < q2]
q1 = statistics.median(lower_half)
print("Q1:", q1)
upper_half = [n for n in numbers_in_order if n > q2]
q3 = statistics.median(upper_half)
print("Q3:", q3)
interquartile = q3 - q1
print("Interquartile:", interquartile)
inner_offset = interquartile * 1.5
inner_fences = q1 - inner_offset, q3 + inner_offset
print("Inner fences:", inner_fences)
outer_offset = interquartile * 3.0
lower_fence, higher_fence = outer_fences = q1 - outer_offset, q3 + outer_offset
print("Outer fences:", outer_fences)
data_without_outliers = [n for n in numbers if lower_fence <= n <= higher_fence]
print("Data without outliers:", data_without_outliers)
|
Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)#!python3
import statistics
#
# Explanation from: http://www.wikihow.com/Calculate-Outliers
#
numbers = [1.0, 2.0, 2.3, 3.0, 3.2, 4.0, 100.0, 4.5, 5.11, 6.0, 8.0]
#~ numbers = [71.0, 70.0, 73.0, 70.0, 70.0, 69.0, 70.0, 72.0, 71.0, 300.0, 71.0, 69.0]
numbers_in_order = sorted(numbers)
print("Numbers:", numbers_in_order)
q2 = statistics.median(numbers)
print("Q2:", q2)
lower_half = [n for n in numbers_in_order if n < q2]
q1 = statistics.median(lower_half)
print("Q1:", q1)
upper_half = [n for n in numbers_in_order if n > q2]
q3 = statistics.median(upper_half)
print("Q3:", q3)
interquartile = q3 - q1
print("Interquartile:", interquartile)
inner_offset = interquartile * 1.5
inner_fences = q1 - inner_offset, q3 + inner_offset
print("Inner fences:", inner_fences)
outer_offset = interquartile * 3.0
lower_fence, higher_fence = outer_fences = q1 - outer_offset, q3 + outer_offset
print("Outer fences:", outer_fences)
data_without_outliers = [n for n in numbers if lower_fence <= n <= higher_fence]
print("Data without outliers:", data_without_outliers)
|
<commit_before><commit_msg>Add a demo of finding outliers, to be used in the sensor code (since we sometimes get out-of-the-way returns)<commit_after>#!python3
import statistics
#
# Explanation from: http://www.wikihow.com/Calculate-Outliers
#
numbers = [1.0, 2.0, 2.3, 3.0, 3.2, 4.0, 100.0, 4.5, 5.11, 6.0, 8.0]
#~ numbers = [71.0, 70.0, 73.0, 70.0, 70.0, 69.0, 70.0, 72.0, 71.0, 300.0, 71.0, 69.0]
numbers_in_order = sorted(numbers)
print("Numbers:", numbers_in_order)
q2 = statistics.median(numbers)
print("Q2:", q2)
lower_half = [n for n in numbers_in_order if n < q2]
q1 = statistics.median(lower_half)
print("Q1:", q1)
upper_half = [n for n in numbers_in_order if n > q2]
q3 = statistics.median(upper_half)
print("Q3:", q3)
interquartile = q3 - q1
print("Interquartile:", interquartile)
inner_offset = interquartile * 1.5
inner_fences = q1 - inner_offset, q3 + inner_offset
print("Inner fences:", inner_fences)
outer_offset = interquartile * 3.0
lower_fence, higher_fence = outer_fences = q1 - outer_offset, q3 + outer_offset
print("Outer fences:", outer_fences)
data_without_outliers = [n for n in numbers if lower_fence <= n <= higher_fence]
print("Data without outliers:", data_without_outliers)
|
|
316e3f29dcb38e18273cd123924a36e4302e7740
|
stdnum/us/ptin.py
|
stdnum/us/ptin.py
|
# ptin.py - functions for handling PTINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PTIN (U.S. Preparer Tax Identification Number).
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
>>> validate('P-00634642')
'P00634642'
>>> validate('P01594846')
'P01594846'
>>> validate('00634642') # missing P
Traceback (most recent call last):
...
InvalidFormat: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching PTINs
_ptin_re = re.compile('^P[0-9]{8}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid PTIN. This checks
the length, groups and formatting if it is present."""
number = compact(number).upper()
if not _ptin_re.search(number):
raise InvalidFormat()
# sadly, no more information on PTIN number validation was found
return number
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length, groups and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
|
Add a United States PTIN module
|
Add a United States PTIN module
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
|
Python
|
lgpl-2.1
|
t0mk/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum,dchoruzy/python-stdnum,tonyseek/python-stdnum,holvi/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum
|
Add a United States PTIN module
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
|
# ptin.py - functions for handling PTINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PTIN (U.S. Preparer Tax Identification Number).
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
>>> validate('P-00634642')
'P00634642'
>>> validate('P01594846')
'P01594846'
>>> validate('00634642') # missing P
Traceback (most recent call last):
...
InvalidFormat: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching PTINs
_ptin_re = re.compile('^P[0-9]{8}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid PTIN. This checks
the length, groups and formatting if it is present."""
number = compact(number).upper()
if not _ptin_re.search(number):
raise InvalidFormat()
# sadly, no more information on PTIN number validation was found
return number
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length, groups and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
|
<commit_before><commit_msg>Add a United States PTIN module
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.<commit_after>
|
# ptin.py - functions for handling PTINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PTIN (U.S. Preparer Tax Identification Number).
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
>>> validate('P-00634642')
'P00634642'
>>> validate('P01594846')
'P01594846'
>>> validate('00634642') # missing P
Traceback (most recent call last):
...
InvalidFormat: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching PTINs
_ptin_re = re.compile('^P[0-9]{8}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid PTIN. This checks
the length, groups and formatting if it is present."""
number = compact(number).upper()
if not _ptin_re.search(number):
raise InvalidFormat()
# sadly, no more information on PTIN number validation was found
return number
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length, groups and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
|
Add a United States PTIN module
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.# ptin.py - functions for handling PTINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PTIN (U.S. Preparer Tax Identification Number).
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
>>> validate('P-00634642')
'P00634642'
>>> validate('P01594846')
'P01594846'
>>> validate('00634642') # missing P
Traceback (most recent call last):
...
InvalidFormat: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching PTINs
_ptin_re = re.compile('^P[0-9]{8}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid PTIN. This checks
the length, groups and formatting if it is present."""
number = compact(number).upper()
if not _ptin_re.search(number):
raise InvalidFormat()
# sadly, no more information on PTIN number validation was found
return number
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length, groups and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
|
<commit_before><commit_msg>Add a United States PTIN module
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.<commit_after># ptin.py - functions for handling PTINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""PTIN (U.S. Preparer Tax Identification Number).
A Preparer Tax Identification Number (PTIN) is United States
identification number for tax return preparers. It is an eight-digit
number prefixed with a capital P.
>>> validate('P-00634642')
'P00634642'
>>> validate('P01594846')
'P01594846'
>>> validate('00634642') # missing P
Traceback (most recent call last):
...
InvalidFormat: ...
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching PTINs
_ptin_re = re.compile('^P[0-9]{8}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid PTIN. This checks
the length, groups and formatting if it is present."""
number = compact(number).upper()
if not _ptin_re.search(number):
raise InvalidFormat()
# sadly, no more information on PTIN number validation was found
return number
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length, groups and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
|
|
6d69afd55b3dbff580c570044d58101ddf4ea04a
|
talkoohakemisto/migrations/versions/63875fc6ebe_create_voluntary_work_table.py
|
talkoohakemisto/migrations/versions/63875fc6ebe_create_voluntary_work_table.py
|
"""Create `voluntary_work` table
Revision ID: 63875fc6ebe
Revises: 7d1ccd9c523
Create Date: 2014-02-09 13:49:24.946138
"""
# revision identifiers, used by Alembic.
revision = '63875fc6ebe'
down_revision = '7d1ccd9c523'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('organizer', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('street_address', sa.Unicode(length=100), nullable=False),
sa.Column('contact_email', sa.Unicode(length=100), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('municipality_code', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['municipality_code'], ['municipality.code']),
sa.ForeignKeyConstraint(['type_id'], ['voluntary_work_type.id']),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('voluntary_work')
|
Add migration to create voluntary_work table
|
Add migration to create voluntary_work table
|
Python
|
mit
|
talkoopaiva/talkoohakemisto-api
|
Add migration to create voluntary_work table
|
"""Create `voluntary_work` table
Revision ID: 63875fc6ebe
Revises: 7d1ccd9c523
Create Date: 2014-02-09 13:49:24.946138
"""
# revision identifiers, used by Alembic.
revision = '63875fc6ebe'
down_revision = '7d1ccd9c523'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('organizer', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('street_address', sa.Unicode(length=100), nullable=False),
sa.Column('contact_email', sa.Unicode(length=100), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('municipality_code', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['municipality_code'], ['municipality.code']),
sa.ForeignKeyConstraint(['type_id'], ['voluntary_work_type.id']),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('voluntary_work')
|
<commit_before><commit_msg>Add migration to create voluntary_work table<commit_after>
|
"""Create `voluntary_work` table
Revision ID: 63875fc6ebe
Revises: 7d1ccd9c523
Create Date: 2014-02-09 13:49:24.946138
"""
# revision identifiers, used by Alembic.
revision = '63875fc6ebe'
down_revision = '7d1ccd9c523'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('organizer', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('street_address', sa.Unicode(length=100), nullable=False),
sa.Column('contact_email', sa.Unicode(length=100), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('municipality_code', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['municipality_code'], ['municipality.code']),
sa.ForeignKeyConstraint(['type_id'], ['voluntary_work_type.id']),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('voluntary_work')
|
Add migration to create voluntary_work table"""Create `voluntary_work` table
Revision ID: 63875fc6ebe
Revises: 7d1ccd9c523
Create Date: 2014-02-09 13:49:24.946138
"""
# revision identifiers, used by Alembic.
revision = '63875fc6ebe'
down_revision = '7d1ccd9c523'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('organizer', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('street_address', sa.Unicode(length=100), nullable=False),
sa.Column('contact_email', sa.Unicode(length=100), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('municipality_code', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['municipality_code'], ['municipality.code']),
sa.ForeignKeyConstraint(['type_id'], ['voluntary_work_type.id']),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('voluntary_work')
|
<commit_before><commit_msg>Add migration to create voluntary_work table<commit_after>"""Create `voluntary_work` table
Revision ID: 63875fc6ebe
Revises: 7d1ccd9c523
Create Date: 2014-02-09 13:49:24.946138
"""
# revision identifiers, used by Alembic.
revision = '63875fc6ebe'
down_revision = '7d1ccd9c523'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'voluntary_work',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('organizer', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=False),
sa.Column('street_address', sa.Unicode(length=100), nullable=False),
sa.Column('contact_email', sa.Unicode(length=100), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('municipality_code', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['municipality_code'], ['municipality.code']),
sa.ForeignKeyConstraint(['type_id'], ['voluntary_work_type.id']),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('voluntary_work')
|
|
92842c10ca9782047435bb1710c2943aa0a362f2
|
cloud-builder/generate_dependency_health_svg.py
|
cloud-builder/generate_dependency_health_svg.py
|
"""Generates a SVG by parsing json report.
Task dependencyUpdates generates a json report. This script parses that file and
generates a SVG showing percentage of up-to-date dependencies.
Usage:
> python generate_dependency_health_svg.py <path_to_json_report> <output_svg_path>
"""
import json
from sys import argv
TEMPLATES_DIR = 'templates/'
SVG_TEMPLATE_BAD = TEMPLATES_DIR + 'dependency_health_bad.svg'
SVG_TEMPLATE_AVG = TEMPLATES_DIR + 'dependency_health_average.svg'
SVG_TEMPLATE_GOOD = TEMPLATES_DIR + 'dependency_health_good.svg'
def get_template(health):
"""Returns a SVG template based on overall healthy dependency percentage."""
if health >= 67:
return SVG_TEMPLATE_GOOD
elif 33 < health < 67:
return SVG_TEMPLATE_AVG
else:
return SVG_TEMPLATE_BAD
def create_svg(svg_template, output_path, health):
"""Create a new svg from template and replace placeholder _%_ with health."""
with open(svg_template, 'r') as template:
with open(output_path, 'w') as dest:
data = template.read().replace('_%_', '{}%'.format(health))
dest.write(data)
def print_stats(data):
# Total detected gradle dependencies
total = data['count']
del data['count']
# Gradle version info
running_gradle = data['gradle']['running']['version']
latest_gradle = data['gradle']['current']['version']
del data['gradle']
if running_gradle != latest_gradle:
print('-------------------------')
print('Gradle: {} -> {}'.format(running_gradle, latest_gradle))
# Dependencies status
print('-------------------------')
for k in data:
print '{:10} : {:10}'.format(k, data[k].get('count'))
print('-------------------------')
print('Total = {}'.format(total))
print('-------------------------')
def calculate_dependency_health(json_report):
"""Parses json report and calculates percentage of up-to-date dependencies."""
with open(json_report) as f:
data = json.load(f)
print_stats(data.copy())
health = 100 * data.get('current').get('count') / data['count']
print('Healthy percentage : {}%'.format(health))
return health
def main():
if len(argv) != 3:
raise Exception('Need exactly 2 arguments: <json_report> <output_path>')
input = argv[1]
output = argv[2]
health = calculate_dependency_health(input)
template = get_template(health)
create_svg(template, output, health)
if __name__ == "__main__":
main()
|
Create python script to generate svg from json reports
|
Create python script to generate svg from json reports
|
Python
|
apache-2.0
|
google/ground-android,google/ground-android,google/ground-android
|
Create python script to generate svg from json reports
|
"""Generates a SVG by parsing json report.
Task dependencyUpdates generates a json report. This script parses that file and
generates a SVG showing percentage of up-to-date dependencies.
Usage:
> python generate_dependency_health_svg.py <path_to_json_report> <output_svg_path>
"""
import json
from sys import argv
TEMPLATES_DIR = 'templates/'
SVG_TEMPLATE_BAD = TEMPLATES_DIR + 'dependency_health_bad.svg'
SVG_TEMPLATE_AVG = TEMPLATES_DIR + 'dependency_health_average.svg'
SVG_TEMPLATE_GOOD = TEMPLATES_DIR + 'dependency_health_good.svg'
def get_template(health):
"""Returns a SVG template based on overall healthy dependency percentage."""
if health >= 67:
return SVG_TEMPLATE_GOOD
elif 33 < health < 67:
return SVG_TEMPLATE_AVG
else:
return SVG_TEMPLATE_BAD
def create_svg(svg_template, output_path, health):
"""Create a new svg from template and replace placeholder _%_ with health."""
with open(svg_template, 'r') as template:
with open(output_path, 'w') as dest:
data = template.read().replace('_%_', '{}%'.format(health))
dest.write(data)
def print_stats(data):
# Total detected gradle dependencies
total = data['count']
del data['count']
# Gradle version info
running_gradle = data['gradle']['running']['version']
latest_gradle = data['gradle']['current']['version']
del data['gradle']
if running_gradle != latest_gradle:
print('-------------------------')
print('Gradle: {} -> {}'.format(running_gradle, latest_gradle))
# Dependencies status
print('-------------------------')
for k in data:
print '{:10} : {:10}'.format(k, data[k].get('count'))
print('-------------------------')
print('Total = {}'.format(total))
print('-------------------------')
def calculate_dependency_health(json_report):
"""Parses json report and calculates percentage of up-to-date dependencies."""
with open(json_report) as f:
data = json.load(f)
print_stats(data.copy())
health = 100 * data.get('current').get('count') / data['count']
print('Healthy percentage : {}%'.format(health))
return health
def main():
if len(argv) != 3:
raise Exception('Need exactly 2 arguments: <json_report> <output_path>')
input = argv[1]
output = argv[2]
health = calculate_dependency_health(input)
template = get_template(health)
create_svg(template, output, health)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create python script to generate svg from json reports<commit_after>
|
"""Generates a SVG by parsing json report.
Task dependencyUpdates generates a json report. This script parses that file and
generates a SVG showing percentage of up-to-date dependencies.
Usage:
> python generate_dependency_health_svg.py <path_to_json_report> <output_svg_path>
"""
import json
from sys import argv
TEMPLATES_DIR = 'templates/'
SVG_TEMPLATE_BAD = TEMPLATES_DIR + 'dependency_health_bad.svg'
SVG_TEMPLATE_AVG = TEMPLATES_DIR + 'dependency_health_average.svg'
SVG_TEMPLATE_GOOD = TEMPLATES_DIR + 'dependency_health_good.svg'
def get_template(health):
"""Returns a SVG template based on overall healthy dependency percentage."""
if health >= 67:
return SVG_TEMPLATE_GOOD
elif 33 < health < 67:
return SVG_TEMPLATE_AVG
else:
return SVG_TEMPLATE_BAD
def create_svg(svg_template, output_path, health):
"""Create a new svg from template and replace placeholder _%_ with health."""
with open(svg_template, 'r') as template:
with open(output_path, 'w') as dest:
data = template.read().replace('_%_', '{}%'.format(health))
dest.write(data)
def print_stats(data):
# Total detected gradle dependencies
total = data['count']
del data['count']
# Gradle version info
running_gradle = data['gradle']['running']['version']
latest_gradle = data['gradle']['current']['version']
del data['gradle']
if running_gradle != latest_gradle:
print('-------------------------')
print('Gradle: {} -> {}'.format(running_gradle, latest_gradle))
# Dependencies status
print('-------------------------')
for k in data:
print '{:10} : {:10}'.format(k, data[k].get('count'))
print('-------------------------')
print('Total = {}'.format(total))
print('-------------------------')
def calculate_dependency_health(json_report):
"""Parses json report and calculates percentage of up-to-date dependencies."""
with open(json_report) as f:
data = json.load(f)
print_stats(data.copy())
health = 100 * data.get('current').get('count') / data['count']
print('Healthy percentage : {}%'.format(health))
return health
def main():
if len(argv) != 3:
raise Exception('Need exactly 2 arguments: <json_report> <output_path>')
input = argv[1]
output = argv[2]
health = calculate_dependency_health(input)
template = get_template(health)
create_svg(template, output, health)
if __name__ == "__main__":
main()
|
Create python script to generate svg from json reports"""Generates a SVG by parsing json report.
Task dependencyUpdates generates a json report. This script parses that file and
generates a SVG showing percentage of up-to-date dependencies.
Usage:
> python generate_dependency_health_svg.py <path_to_json_report> <output_svg_path>
"""
import json
from sys import argv
TEMPLATES_DIR = 'templates/'
SVG_TEMPLATE_BAD = TEMPLATES_DIR + 'dependency_health_bad.svg'
SVG_TEMPLATE_AVG = TEMPLATES_DIR + 'dependency_health_average.svg'
SVG_TEMPLATE_GOOD = TEMPLATES_DIR + 'dependency_health_good.svg'
def get_template(health):
"""Returns a SVG template based on overall healthy dependency percentage."""
if health >= 67:
return SVG_TEMPLATE_GOOD
elif 33 < health < 67:
return SVG_TEMPLATE_AVG
else:
return SVG_TEMPLATE_BAD
def create_svg(svg_template, output_path, health):
"""Create a new svg from template and replace placeholder _%_ with health."""
with open(svg_template, 'r') as template:
with open(output_path, 'w') as dest:
data = template.read().replace('_%_', '{}%'.format(health))
dest.write(data)
def print_stats(data):
# Total detected gradle dependencies
total = data['count']
del data['count']
# Gradle version info
running_gradle = data['gradle']['running']['version']
latest_gradle = data['gradle']['current']['version']
del data['gradle']
if running_gradle != latest_gradle:
print('-------------------------')
print('Gradle: {} -> {}'.format(running_gradle, latest_gradle))
# Dependencies status
print('-------------------------')
for k in data:
print '{:10} : {:10}'.format(k, data[k].get('count'))
print('-------------------------')
print('Total = {}'.format(total))
print('-------------------------')
def calculate_dependency_health(json_report):
"""Parses json report and calculates percentage of up-to-date dependencies."""
with open(json_report) as f:
data = json.load(f)
print_stats(data.copy())
health = 100 * data.get('current').get('count') / data['count']
print('Healthy percentage : {}%'.format(health))
return health
def main():
if len(argv) != 3:
raise Exception('Need exactly 2 arguments: <json_report> <output_path>')
input = argv[1]
output = argv[2]
health = calculate_dependency_health(input)
template = get_template(health)
create_svg(template, output, health)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create python script to generate svg from json reports<commit_after>"""Generates a SVG by parsing json report.
Task dependencyUpdates generates a json report. This script parses that file and
generates a SVG showing percentage of up-to-date dependencies.
Usage:
> python generate_dependency_health_svg.py <path_to_json_report> <output_svg_path>
"""
import json
from sys import argv
TEMPLATES_DIR = 'templates/'
SVG_TEMPLATE_BAD = TEMPLATES_DIR + 'dependency_health_bad.svg'
SVG_TEMPLATE_AVG = TEMPLATES_DIR + 'dependency_health_average.svg'
SVG_TEMPLATE_GOOD = TEMPLATES_DIR + 'dependency_health_good.svg'
def get_template(health):
"""Returns a SVG template based on overall healthy dependency percentage."""
if health >= 67:
return SVG_TEMPLATE_GOOD
elif 33 < health < 67:
return SVG_TEMPLATE_AVG
else:
return SVG_TEMPLATE_BAD
def create_svg(svg_template, output_path, health):
"""Create a new svg from template and replace placeholder _%_ with health."""
with open(svg_template, 'r') as template:
with open(output_path, 'w') as dest:
data = template.read().replace('_%_', '{}%'.format(health))
dest.write(data)
def print_stats(data):
# Total detected gradle dependencies
total = data['count']
del data['count']
# Gradle version info
running_gradle = data['gradle']['running']['version']
latest_gradle = data['gradle']['current']['version']
del data['gradle']
if running_gradle != latest_gradle:
print('-------------------------')
print('Gradle: {} -> {}'.format(running_gradle, latest_gradle))
# Dependencies status
print('-------------------------')
for k in data:
print '{:10} : {:10}'.format(k, data[k].get('count'))
print('-------------------------')
print('Total = {}'.format(total))
print('-------------------------')
def calculate_dependency_health(json_report):
"""Parses json report and calculates percentage of up-to-date dependencies."""
with open(json_report) as f:
data = json.load(f)
print_stats(data.copy())
health = 100 * data.get('current').get('count') / data['count']
print('Healthy percentage : {}%'.format(health))
return health
def main():
if len(argv) != 3:
raise Exception('Need exactly 2 arguments: <json_report> <output_path>')
input = argv[1]
output = argv[2]
health = calculate_dependency_health(input)
template = get_template(health)
create_svg(template, output, health)
if __name__ == "__main__":
main()
|
|
94bbfda63c7734c43e5771a92a69e6ce15d29c24
|
src/hades/common/exc.py
|
src/hades/common/exc.py
|
import functools
import logging
import os
import sys
import typing as t
from contextlib import contextmanager
from logging import Logger
RESTART_PREVENTING_EXCEPTIONS = frozenset(
(os.EX_CONFIG, os.EX_USAGE, os.EX_UNAVAILABLE)
)
class HadesSetupError(Exception):
preferred_exit_code = os.EX_UNAVAILABLE
def __init__(self, *args, logger: t.Optional[Logger] = None):
super().__init__(*args)
self.logger = logger
def __init_subclass__(cls, **kwargs: dict[str, t.Any]) -> None:
super().__init_subclass__(**kwargs)
if "preferred_exit_code" not in cls.__dict__:
return
if cls.__dict__["preferred_exit_code"] not in RESTART_PREVENTING_EXCEPTIONS:
raise ValueError(
"Subclasses of HadesSetupException can only provide exit codes"
" known to prevent a restart (see `RestartPreventExitStatus=` in systemd.service(5))"
)
def report_error(self, fallback_logger: Logger) -> None:
"""Emit helpful log messages about this error."""
logger = self.logger or fallback_logger
logger.critical("Error in setup: %s", str(self), exc_info=self)
class HadesUsageError(HadesSetupError):
preferred_exit_code = os.EX_USAGE
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@contextmanager
def handle_setup_errors(logger: logging.Logger) -> t.Generator[None, None, None]:
"""If a :class:`HadesSetupError` occurs, report it and call :func:`sys.exit` accordingly."""
try:
yield
except HadesSetupError as e:
e.report_error(fallback_logger=logger)
sys.exit(e.preferred_exit_code)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def handles_setup_errors(logger: logging.Logger) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapped(*a, **kw):
with handle_setup_errors(logger):
f(*a, **kw)
return t.cast(F, wrapped)
return decorator
|
Introduce HadesSetupError and HadesUsageError for restart prevention
|
Introduce HadesSetupError and HadesUsageError for restart prevention
|
Python
|
mit
|
agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades
|
Introduce HadesSetupError and HadesUsageError for restart prevention
|
import functools
import logging
import os
import sys
import typing as t
from contextlib import contextmanager
from logging import Logger
RESTART_PREVENTING_EXCEPTIONS = frozenset(
(os.EX_CONFIG, os.EX_USAGE, os.EX_UNAVAILABLE)
)
class HadesSetupError(Exception):
preferred_exit_code = os.EX_UNAVAILABLE
def __init__(self, *args, logger: t.Optional[Logger] = None):
super().__init__(*args)
self.logger = logger
def __init_subclass__(cls, **kwargs: dict[str, t.Any]) -> None:
super().__init_subclass__(**kwargs)
if "preferred_exit_code" not in cls.__dict__:
return
if cls.__dict__["preferred_exit_code"] not in RESTART_PREVENTING_EXCEPTIONS:
raise ValueError(
"Subclasses of HadesSetupException can only provide exit codes"
" known to prevent a restart (see `RestartPreventExitStatus=` in systemd.service(5))"
)
def report_error(self, fallback_logger: Logger) -> None:
"""Emit helpful log messages about this error."""
logger = self.logger or fallback_logger
logger.critical("Error in setup: %s", str(self), exc_info=self)
class HadesUsageError(HadesSetupError):
preferred_exit_code = os.EX_USAGE
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@contextmanager
def handle_setup_errors(logger: logging.Logger) -> t.Generator[None, None, None]:
"""If a :class:`HadesSetupError` occurs, report it and call :func:`sys.exit` accordingly."""
try:
yield
except HadesSetupError as e:
e.report_error(fallback_logger=logger)
sys.exit(e.preferred_exit_code)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def handles_setup_errors(logger: logging.Logger) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapped(*a, **kw):
with handle_setup_errors(logger):
f(*a, **kw)
return t.cast(F, wrapped)
return decorator
|
<commit_before><commit_msg>Introduce HadesSetupError and HadesUsageError for restart prevention<commit_after>
|
import functools
import logging
import os
import sys
import typing as t
from contextlib import contextmanager
from logging import Logger
RESTART_PREVENTING_EXCEPTIONS = frozenset(
(os.EX_CONFIG, os.EX_USAGE, os.EX_UNAVAILABLE)
)
class HadesSetupError(Exception):
preferred_exit_code = os.EX_UNAVAILABLE
def __init__(self, *args, logger: t.Optional[Logger] = None):
super().__init__(*args)
self.logger = logger
def __init_subclass__(cls, **kwargs: dict[str, t.Any]) -> None:
super().__init_subclass__(**kwargs)
if "preferred_exit_code" not in cls.__dict__:
return
if cls.__dict__["preferred_exit_code"] not in RESTART_PREVENTING_EXCEPTIONS:
raise ValueError(
"Subclasses of HadesSetupException can only provide exit codes"
" known to prevent a restart (see `RestartPreventExitStatus=` in systemd.service(5))"
)
def report_error(self, fallback_logger: Logger) -> None:
"""Emit helpful log messages about this error."""
logger = self.logger or fallback_logger
logger.critical("Error in setup: %s", str(self), exc_info=self)
class HadesUsageError(HadesSetupError):
preferred_exit_code = os.EX_USAGE
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@contextmanager
def handle_setup_errors(logger: logging.Logger) -> t.Generator[None, None, None]:
"""If a :class:`HadesSetupError` occurs, report it and call :func:`sys.exit` accordingly."""
try:
yield
except HadesSetupError as e:
e.report_error(fallback_logger=logger)
sys.exit(e.preferred_exit_code)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def handles_setup_errors(logger: logging.Logger) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapped(*a, **kw):
with handle_setup_errors(logger):
f(*a, **kw)
return t.cast(F, wrapped)
return decorator
|
Introduce HadesSetupError and HadesUsageError for restart preventionimport functools
import logging
import os
import sys
import typing as t
from contextlib import contextmanager
from logging import Logger
RESTART_PREVENTING_EXCEPTIONS = frozenset(
(os.EX_CONFIG, os.EX_USAGE, os.EX_UNAVAILABLE)
)
class HadesSetupError(Exception):
preferred_exit_code = os.EX_UNAVAILABLE
def __init__(self, *args, logger: t.Optional[Logger] = None):
super().__init__(*args)
self.logger = logger
def __init_subclass__(cls, **kwargs: dict[str, t.Any]) -> None:
super().__init_subclass__(**kwargs)
if "preferred_exit_code" not in cls.__dict__:
return
if cls.__dict__["preferred_exit_code"] not in RESTART_PREVENTING_EXCEPTIONS:
raise ValueError(
"Subclasses of HadesSetupException can only provide exit codes"
" known to prevent a restart (see `RestartPreventExitStatus=` in systemd.service(5))"
)
def report_error(self, fallback_logger: Logger) -> None:
"""Emit helpful log messages about this error."""
logger = self.logger or fallback_logger
logger.critical("Error in setup: %s", str(self), exc_info=self)
class HadesUsageError(HadesSetupError):
preferred_exit_code = os.EX_USAGE
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@contextmanager
def handle_setup_errors(logger: logging.Logger) -> t.Generator[None, None, None]:
"""If a :class:`HadesSetupError` occurs, report it and call :func:`sys.exit` accordingly."""
try:
yield
except HadesSetupError as e:
e.report_error(fallback_logger=logger)
sys.exit(e.preferred_exit_code)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def handles_setup_errors(logger: logging.Logger) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapped(*a, **kw):
with handle_setup_errors(logger):
f(*a, **kw)
return t.cast(F, wrapped)
return decorator
|
<commit_before><commit_msg>Introduce HadesSetupError and HadesUsageError for restart prevention<commit_after>import functools
import logging
import os
import sys
import typing as t
from contextlib import contextmanager
from logging import Logger
RESTART_PREVENTING_EXCEPTIONS = frozenset(
(os.EX_CONFIG, os.EX_USAGE, os.EX_UNAVAILABLE)
)
class HadesSetupError(Exception):
preferred_exit_code = os.EX_UNAVAILABLE
def __init__(self, *args, logger: t.Optional[Logger] = None):
super().__init__(*args)
self.logger = logger
def __init_subclass__(cls, **kwargs: dict[str, t.Any]) -> None:
super().__init_subclass__(**kwargs)
if "preferred_exit_code" not in cls.__dict__:
return
if cls.__dict__["preferred_exit_code"] not in RESTART_PREVENTING_EXCEPTIONS:
raise ValueError(
"Subclasses of HadesSetupException can only provide exit codes"
" known to prevent a restart (see `RestartPreventExitStatus=` in systemd.service(5))"
)
def report_error(self, fallback_logger: Logger) -> None:
"""Emit helpful log messages about this error."""
logger = self.logger or fallback_logger
logger.critical("Error in setup: %s", str(self), exc_info=self)
class HadesUsageError(HadesSetupError):
preferred_exit_code = os.EX_USAGE
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
@contextmanager
def handle_setup_errors(logger: logging.Logger) -> t.Generator[None, None, None]:
"""If a :class:`HadesSetupError` occurs, report it and call :func:`sys.exit` accordingly."""
try:
yield
except HadesSetupError as e:
e.report_error(fallback_logger=logger)
sys.exit(e.preferred_exit_code)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def handles_setup_errors(logger: logging.Logger) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapped(*a, **kw):
with handle_setup_errors(logger):
f(*a, **kw)
return t.cast(F, wrapped)
return decorator
|
|
a4f1a827847ec8bc129bb885660a9182886237c2
|
src/scripts/get_arxiv.py
|
src/scripts/get_arxiv.py
|
import sys
import os
import bs4
import urllib2
import urllib
BASE_URL = "http://arxiv.org"
HEP_URL = 'http://arxiv.org/abs/hep-th/%d'
# TODO: Change prints to logs
def get_pdf(paper_id, save_dir):
try:
paper_page = urllib2.urlopen(HEP_URL % paper_id)
soup = bs4.BeautifulSoup(paper_page.read().decode('utf8'))
except:
print "Error"
else:
# TODO: Check if this pattern holds for all papers
file = soup.find("a", {"accesskey" : "f"})
if file:
file_url = file["href"]
print os.path.join(save_dir, str(paper_id) + ".pdf")
urllib.urlretrieve(BASE_URL + file_url, os.path.join(save_dir, str(paper_id) + ".pdf"))
else:
print "Unable to find PDF: %d" % paper_id
def main():
if len(sys.argv) > 2 and sys.argv[1].isdigit() and os.path.isdir(sys.argv[2]):
get_pdf(int(sys.argv[1]), sys.argv[2])
else:
print "Usage <paper id> <destination directory>"
if __name__ == "__main__": main()
|
Add script to download paper from arxiv
|
Add script to download paper from arxiv
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add script to download paper from arxiv
|
import sys
import os
import bs4
import urllib2
import urllib
BASE_URL = "http://arxiv.org"
HEP_URL = 'http://arxiv.org/abs/hep-th/%d'
# TODO: Change prints to logs
def get_pdf(paper_id, save_dir):
try:
paper_page = urllib2.urlopen(HEP_URL % paper_id)
soup = bs4.BeautifulSoup(paper_page.read().decode('utf8'))
except:
print "Error"
else:
# TODO: Check if this pattern holds for all papers
file = soup.find("a", {"accesskey" : "f"})
if file:
file_url = file["href"]
print os.path.join(save_dir, str(paper_id) + ".pdf")
urllib.urlretrieve(BASE_URL + file_url, os.path.join(save_dir, str(paper_id) + ".pdf"))
else:
print "Unable to find PDF: %d" % paper_id
def main():
if len(sys.argv) > 2 and sys.argv[1].isdigit() and os.path.isdir(sys.argv[2]):
get_pdf(int(sys.argv[1]), sys.argv[2])
else:
print "Usage <paper id> <destination directory>"
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to download paper from arxiv<commit_after>
|
import sys
import os
import bs4
import urllib2
import urllib
BASE_URL = "http://arxiv.org"
HEP_URL = 'http://arxiv.org/abs/hep-th/%d'
# TODO: Change prints to logs
def get_pdf(paper_id, save_dir):
try:
paper_page = urllib2.urlopen(HEP_URL % paper_id)
soup = bs4.BeautifulSoup(paper_page.read().decode('utf8'))
except:
print "Error"
else:
# TODO: Check if this pattern holds for all papers
file = soup.find("a", {"accesskey" : "f"})
if file:
file_url = file["href"]
print os.path.join(save_dir, str(paper_id) + ".pdf")
urllib.urlretrieve(BASE_URL + file_url, os.path.join(save_dir, str(paper_id) + ".pdf"))
else:
print "Unable to find PDF: %d" % paper_id
def main():
if len(sys.argv) > 2 and sys.argv[1].isdigit() and os.path.isdir(sys.argv[2]):
get_pdf(int(sys.argv[1]), sys.argv[2])
else:
print "Usage <paper id> <destination directory>"
if __name__ == "__main__": main()
|
Add script to download paper from arxivimport sys
import os
import bs4
import urllib2
import urllib
BASE_URL = "http://arxiv.org"
HEP_URL = 'http://arxiv.org/abs/hep-th/%d'
# TODO: Change prints to logs
def get_pdf(paper_id, save_dir):
try:
paper_page = urllib2.urlopen(HEP_URL % paper_id)
soup = bs4.BeautifulSoup(paper_page.read().decode('utf8'))
except:
print "Error"
else:
# TODO: Check if this pattern holds for all papers
file = soup.find("a", {"accesskey" : "f"})
if file:
file_url = file["href"]
print os.path.join(save_dir, str(paper_id) + ".pdf")
urllib.urlretrieve(BASE_URL + file_url, os.path.join(save_dir, str(paper_id) + ".pdf"))
else:
print "Unable to find PDF: %d" % paper_id
def main():
if len(sys.argv) > 2 and sys.argv[1].isdigit() and os.path.isdir(sys.argv[2]):
get_pdf(int(sys.argv[1]), sys.argv[2])
else:
print "Usage <paper id> <destination directory>"
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to download paper from arxiv<commit_after>import sys
import os
import bs4
import urllib2
import urllib
BASE_URL = "http://arxiv.org"
HEP_URL = 'http://arxiv.org/abs/hep-th/%d'
# TODO: Change prints to logs
def get_pdf(paper_id, save_dir):
try:
paper_page = urllib2.urlopen(HEP_URL % paper_id)
soup = bs4.BeautifulSoup(paper_page.read().decode('utf8'))
except:
print "Error"
else:
# TODO: Check if this pattern holds for all papers
file = soup.find("a", {"accesskey" : "f"})
if file:
file_url = file["href"]
print os.path.join(save_dir, str(paper_id) + ".pdf")
urllib.urlretrieve(BASE_URL + file_url, os.path.join(save_dir, str(paper_id) + ".pdf"))
else:
print "Unable to find PDF: %d" % paper_id
def main():
if len(sys.argv) > 2 and sys.argv[1].isdigit() and os.path.isdir(sys.argv[2]):
get_pdf(int(sys.argv[1]), sys.argv[2])
else:
print "Usage <paper id> <destination directory>"
if __name__ == "__main__": main()
|
|
99f408bcc62958310400a20e1074b51361ed43ca
|
tests/test_migrations.py
|
tests/test_migrations.py
|
"""
Tests that migrations are not missing
"""
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from django.core.management import call_command
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`.
Pulled from mozilla/treeherder #dd53914, subject to MPL
"""
with pytest.raises(SystemExit) as e:
# Replace with `check_changes=True` once we're using a Django version that includes:
# https://code.djangoproject.com/ticket/25604
# https://github.com/django/django/pull/5453
call_command('makemigrations', interactive=False, dry_run=True, exit_code=True)
assert str(e.value) == '1'
|
Add test for missing migrations
|
Add test for missing migrations
gh-122
|
Python
|
bsd-2-clause
|
bennylope/django-organizations,st8st8/django-organizations,bennylope/django-organizations,st8st8/django-organizations
|
Add test for missing migrations
gh-122
|
"""
Tests that migrations are not missing
"""
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from django.core.management import call_command
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`.
Pulled from mozilla/treeherder #dd53914, subject to MPL
"""
with pytest.raises(SystemExit) as e:
# Replace with `check_changes=True` once we're using a Django version that includes:
# https://code.djangoproject.com/ticket/25604
# https://github.com/django/django/pull/5453
call_command('makemigrations', interactive=False, dry_run=True, exit_code=True)
assert str(e.value) == '1'
|
<commit_before><commit_msg>Add test for missing migrations
gh-122<commit_after>
|
"""
Tests that migrations are not missing
"""
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from django.core.management import call_command
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`.
Pulled from mozilla/treeherder #dd53914, subject to MPL
"""
with pytest.raises(SystemExit) as e:
# Replace with `check_changes=True` once we're using a Django version that includes:
# https://code.djangoproject.com/ticket/25604
# https://github.com/django/django/pull/5453
call_command('makemigrations', interactive=False, dry_run=True, exit_code=True)
assert str(e.value) == '1'
|
Add test for missing migrations
gh-122"""
Tests that migrations are not missing
"""
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from django.core.management import call_command
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`.
Pulled from mozilla/treeherder #dd53914, subject to MPL
"""
with pytest.raises(SystemExit) as e:
# Replace with `check_changes=True` once we're using a Django version that includes:
# https://code.djangoproject.com/ticket/25604
# https://github.com/django/django/pull/5453
call_command('makemigrations', interactive=False, dry_run=True, exit_code=True)
assert str(e.value) == '1'
|
<commit_before><commit_msg>Add test for missing migrations
gh-122<commit_after>"""
Tests that migrations are not missing
"""
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from django.core.management import call_command
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`.
Pulled from mozilla/treeherder #dd53914, subject to MPL
"""
with pytest.raises(SystemExit) as e:
# Replace with `check_changes=True` once we're using a Django version that includes:
# https://code.djangoproject.com/ticket/25604
# https://github.com/django/django/pull/5453
call_command('makemigrations', interactive=False, dry_run=True, exit_code=True)
assert str(e.value) == '1'
|
|
920b4c2cee3ec37b115a190f5dbae0d2e56ec26a
|
array_split/split_plot.py
|
array_split/split_plot.py
|
"""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
Add skeleton for plotting split.
|
Add skeleton for plotting split.
|
Python
|
mit
|
array-split/array_split
|
Add skeleton for plotting split.
|
"""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
<commit_before><commit_msg>Add skeleton for plotting split.<commit_after>
|
"""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
Add skeleton for plotting split."""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
<commit_before><commit_msg>Add skeleton for plotting split.<commit_after>"""
========================================
The :mod:`array_split.split_plot` Module
========================================
Uses :mod:`matplotlib` to plot a split.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitPlotter - Plots a split.
plot - Plots split shapes.
"""
from __future__ import absolute_import
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class SplitPlotter(object):
"""
Plots a split.
"""
def __init__(self):
"""
"""
pass
def plot(split):
"""
Plots a split.
"""
|
|
a80527aa7883bfff0e7b36acdf9025fe7b1a423d
|
test/tvla_test.py
|
test/tvla_test.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .cmd import Args
from .repo import RepoCmd
class TvlaCmd(RepoCmd):
def __init__(self, args: Args):
# Insert (relative) path to TVLA before the given arguments.
args = Args('cw/cw305/tvla.py') + args
super().__init__(args)
def test_help():
tvla = TvlaCmd(Args('--help')).run()
# Assert that a message is printed on stdout or stderr.
assert(len(tvla.stdout()) != 0 or len(tvla.stderr()) != 0)
|
Add first, simple TVLA command test
|
Add first, simple TVLA command test
Signed-off-by: Andreas Kurth <62c4a7f75976b7a2fb8d79afddaccc3e9236aad5@lowrisc.org>
|
Python
|
apache-2.0
|
lowRISC/ot-sca,lowRISC/ot-sca
|
Add first, simple TVLA command test
Signed-off-by: Andreas Kurth <62c4a7f75976b7a2fb8d79afddaccc3e9236aad5@lowrisc.org>
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .cmd import Args
from .repo import RepoCmd
class TvlaCmd(RepoCmd):
def __init__(self, args: Args):
# Insert (relative) path to TVLA before the given arguments.
args = Args('cw/cw305/tvla.py') + args
super().__init__(args)
def test_help():
tvla = TvlaCmd(Args('--help')).run()
# Assert that a message is printed on stdout or stderr.
assert(len(tvla.stdout()) != 0 or len(tvla.stderr()) != 0)
|
<commit_before><commit_msg>Add first, simple TVLA command test
Signed-off-by: Andreas Kurth <62c4a7f75976b7a2fb8d79afddaccc3e9236aad5@lowrisc.org><commit_after>
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .cmd import Args
from .repo import RepoCmd
class TvlaCmd(RepoCmd):
def __init__(self, args: Args):
# Insert (relative) path to TVLA before the given arguments.
args = Args('cw/cw305/tvla.py') + args
super().__init__(args)
def test_help():
tvla = TvlaCmd(Args('--help')).run()
# Assert that a message is printed on stdout or stderr.
assert(len(tvla.stdout()) != 0 or len(tvla.stderr()) != 0)
|
Add first, simple TVLA command test
Signed-off-by: Andreas Kurth <62c4a7f75976b7a2fb8d79afddaccc3e9236aad5@lowrisc.org># Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .cmd import Args
from .repo import RepoCmd
class TvlaCmd(RepoCmd):
def __init__(self, args: Args):
# Insert (relative) path to TVLA before the given arguments.
args = Args('cw/cw305/tvla.py') + args
super().__init__(args)
def test_help():
tvla = TvlaCmd(Args('--help')).run()
# Assert that a message is printed on stdout or stderr.
assert(len(tvla.stdout()) != 0 or len(tvla.stderr()) != 0)
|
<commit_before><commit_msg>Add first, simple TVLA command test
Signed-off-by: Andreas Kurth <62c4a7f75976b7a2fb8d79afddaccc3e9236aad5@lowrisc.org><commit_after># Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .cmd import Args
from .repo import RepoCmd
class TvlaCmd(RepoCmd):
def __init__(self, args: Args):
# Insert (relative) path to TVLA before the given arguments.
args = Args('cw/cw305/tvla.py') + args
super().__init__(args)
def test_help():
tvla = TvlaCmd(Args('--help')).run()
# Assert that a message is printed on stdout or stderr.
assert(len(tvla.stdout()) != 0 or len(tvla.stderr()) != 0)
|
|
9a845da08d897f4a6b5a3105f6d9001a4f09b45e
|
salt/modules/bluez.py
|
salt/modules/bluez.py
|
'''
Support for Bluetooth (using Bluez in Linux)
'''
import salt.utils
import salt.modules.service
def __virtual__():
'''
Only load the module if bluetooth is installed
'''
if salt.utils.which('bluetoothd'):
return 'bluetooth'
return False
def version():
'''
Return Bluez version from bluetoothd -v
CLI Example::
salt '*' bluetoothd.version
'''
cmd = 'bluetoothd -v'
out = __salt__['cmd.run'](cmd).split('\n')
return out[0]
def address():
'''
Get the many addresses of the Bluetooth adapter
CLI Example::
salt '*' bluetooth.address
'''
cmd = "dbus-send --system --print-reply --dest=org.bluez / org.bluez.Manager.DefaultAdapter|awk '/object path/ {print $3}' | sed 's/\"//g'"
path = __salt__['cmd.run'](cmd).split('\n')
devname = path[0].split('/')
syspath = '/sys/class/bluetooth/%s/address' % devname[-1]
sysfile = open(syspath, 'r')
address = sysfile.read().strip()
sysfile.close()
return {
'path': path[0],
'devname': devname[-1],
'address': address,
}
def scan():
'''
Scan for bluetooth devices in the area
CLI Example::
salt '*' bluetooth.scan
'''
cmd = 'hcitool scan'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
if not line:
continue
if 'Scanning' in line:
continue
comps = line.strip().split()
devname = ' '.join(comps[1:])
ret[comps[0]] = devname
return ret
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example::
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device
to pair with, and 1234 is the passphrase.
'''
address = address()
cmd = 'echo "%s" | bluez-simple-agent %s %s' % (address['devname'], address, key)
out = __salt__['cmd.run'](cmd).split('\n')
return out
def unpair(address):
'''
Unpair the bluetooth adapter from a device
CLI Example::
salt '*' bluetooth.unpair DE:AD:BE:EF:CA:FE
Where DE:AD:BE:EF:CA:FE is the address of the device
to unpair.
'''
address = address()
cmd = 'bluez-test-device remove %s' % address
out = __salt__['cmd.run'](cmd).split('\n')
return out
|
Add basic bluetooth support for salt
|
Add basic bluetooth support for salt
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add basic bluetooth support for salt
|
'''
Support for Bluetooth (using Bluez in Linux)
'''
import salt.utils
import salt.modules.service
def __virtual__():
'''
Only load the module if bluetooth is installed
'''
if salt.utils.which('bluetoothd'):
return 'bluetooth'
return False
def version():
'''
Return Bluez version from bluetoothd -v
CLI Example::
salt '*' bluetoothd.version
'''
cmd = 'bluetoothd -v'
out = __salt__['cmd.run'](cmd).split('\n')
return out[0]
def address():
'''
Get the many addresses of the Bluetooth adapter
CLI Example::
salt '*' bluetooth.address
'''
cmd = "dbus-send --system --print-reply --dest=org.bluez / org.bluez.Manager.DefaultAdapter|awk '/object path/ {print $3}' | sed 's/\"//g'"
path = __salt__['cmd.run'](cmd).split('\n')
devname = path[0].split('/')
syspath = '/sys/class/bluetooth/%s/address' % devname[-1]
sysfile = open(syspath, 'r')
address = sysfile.read().strip()
sysfile.close()
return {
'path': path[0],
'devname': devname[-1],
'address': address,
}
def scan():
'''
Scan for bluetooth devices in the area
CLI Example::
salt '*' bluetooth.scan
'''
cmd = 'hcitool scan'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
if not line:
continue
if 'Scanning' in line:
continue
comps = line.strip().split()
devname = ' '.join(comps[1:])
ret[comps[0]] = devname
return ret
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example::
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device
to pair with, and 1234 is the passphrase.
'''
address = address()
cmd = 'echo "%s" | bluez-simple-agent %s %s' % (address['devname'], address, key)
out = __salt__['cmd.run'](cmd).split('\n')
return out
def unpair(address):
'''
Unpair the bluetooth adapter from a device
CLI Example::
salt '*' bluetooth.unpair DE:AD:BE:EF:CA:FE
Where DE:AD:BE:EF:CA:FE is the address of the device
to unpair.
'''
address = address()
cmd = 'bluez-test-device remove %s' % address
out = __salt__['cmd.run'](cmd).split('\n')
return out
|
<commit_before><commit_msg>Add basic bluetooth support for salt<commit_after>
|
'''
Support for Bluetooth (using Bluez in Linux)
'''
import salt.utils
import salt.modules.service
def __virtual__():
'''
Only load the module if bluetooth is installed
'''
if salt.utils.which('bluetoothd'):
return 'bluetooth'
return False
def version():
'''
Return Bluez version from bluetoothd -v
CLI Example::
salt '*' bluetoothd.version
'''
cmd = 'bluetoothd -v'
out = __salt__['cmd.run'](cmd).split('\n')
return out[0]
def address():
'''
Get the many addresses of the Bluetooth adapter
CLI Example::
salt '*' bluetooth.address
'''
cmd = "dbus-send --system --print-reply --dest=org.bluez / org.bluez.Manager.DefaultAdapter|awk '/object path/ {print $3}' | sed 's/\"//g'"
path = __salt__['cmd.run'](cmd).split('\n')
devname = path[0].split('/')
syspath = '/sys/class/bluetooth/%s/address' % devname[-1]
sysfile = open(syspath, 'r')
address = sysfile.read().strip()
sysfile.close()
return {
'path': path[0],
'devname': devname[-1],
'address': address,
}
def scan():
'''
Scan for bluetooth devices in the area
CLI Example::
salt '*' bluetooth.scan
'''
cmd = 'hcitool scan'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
if not line:
continue
if 'Scanning' in line:
continue
comps = line.strip().split()
devname = ' '.join(comps[1:])
ret[comps[0]] = devname
return ret
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example::
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device
to pair with, and 1234 is the passphrase.
'''
address = address()
cmd = 'echo "%s" | bluez-simple-agent %s %s' % (address['devname'], address, key)
out = __salt__['cmd.run'](cmd).split('\n')
return out
def unpair(address):
'''
Unpair the bluetooth adapter from a device
CLI Example::
salt '*' bluetooth.unpair DE:AD:BE:EF:CA:FE
Where DE:AD:BE:EF:CA:FE is the address of the device
to unpair.
'''
address = address()
cmd = 'bluez-test-device remove %s' % address
out = __salt__['cmd.run'](cmd).split('\n')
return out
|
Add basic bluetooth support for salt'''
Support for Bluetooth (using Bluez in Linux)
'''
import salt.utils
import salt.modules.service
def __virtual__():
'''
Only load the module if bluetooth is installed
'''
if salt.utils.which('bluetoothd'):
return 'bluetooth'
return False
def version():
'''
Return Bluez version from bluetoothd -v
CLI Example::
salt '*' bluetoothd.version
'''
cmd = 'bluetoothd -v'
out = __salt__['cmd.run'](cmd).split('\n')
return out[0]
def address():
'''
Get the many addresses of the Bluetooth adapter
CLI Example::
salt '*' bluetooth.address
'''
cmd = "dbus-send --system --print-reply --dest=org.bluez / org.bluez.Manager.DefaultAdapter|awk '/object path/ {print $3}' | sed 's/\"//g'"
path = __salt__['cmd.run'](cmd).split('\n')
devname = path[0].split('/')
syspath = '/sys/class/bluetooth/%s/address' % devname[-1]
sysfile = open(syspath, 'r')
address = sysfile.read().strip()
sysfile.close()
return {
'path': path[0],
'devname': devname[-1],
'address': address,
}
def scan():
'''
Scan for bluetooth devices in the area
CLI Example::
salt '*' bluetooth.scan
'''
cmd = 'hcitool scan'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
if not line:
continue
if 'Scanning' in line:
continue
comps = line.strip().split()
devname = ' '.join(comps[1:])
ret[comps[0]] = devname
return ret
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example::
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device
to pair with, and 1234 is the passphrase.
'''
address = address()
cmd = 'echo "%s" | bluez-simple-agent %s %s' % (address['devname'], address, key)
out = __salt__['cmd.run'](cmd).split('\n')
return out
def unpair(address):
'''
Unpair the bluetooth adapter from a device
CLI Example::
salt '*' bluetooth.unpair DE:AD:BE:EF:CA:FE
Where DE:AD:BE:EF:CA:FE is the address of the device
to unpair.
'''
address = address()
cmd = 'bluez-test-device remove %s' % address
out = __salt__['cmd.run'](cmd).split('\n')
return out
|
<commit_before><commit_msg>Add basic bluetooth support for salt<commit_after>'''
Support for Bluetooth (using Bluez in Linux)
'''
import salt.utils
import salt.modules.service
def __virtual__():
'''
Only load the module if bluetooth is installed
'''
if salt.utils.which('bluetoothd'):
return 'bluetooth'
return False
def version():
'''
Return Bluez version from bluetoothd -v
CLI Example::
salt '*' bluetoothd.version
'''
cmd = 'bluetoothd -v'
out = __salt__['cmd.run'](cmd).split('\n')
return out[0]
def address():
'''
Get the many addresses of the Bluetooth adapter
CLI Example::
salt '*' bluetooth.address
'''
cmd = "dbus-send --system --print-reply --dest=org.bluez / org.bluez.Manager.DefaultAdapter|awk '/object path/ {print $3}' | sed 's/\"//g'"
path = __salt__['cmd.run'](cmd).split('\n')
devname = path[0].split('/')
syspath = '/sys/class/bluetooth/%s/address' % devname[-1]
sysfile = open(syspath, 'r')
address = sysfile.read().strip()
sysfile.close()
return {
'path': path[0],
'devname': devname[-1],
'address': address,
}
def scan():
'''
Scan for bluetooth devices in the area
CLI Example::
salt '*' bluetooth.scan
'''
cmd = 'hcitool scan'
ret = {}
out = __salt__['cmd.run'](cmd).split('\n')
for line in out:
if not line:
continue
if 'Scanning' in line:
continue
comps = line.strip().split()
devname = ' '.join(comps[1:])
ret[comps[0]] = devname
return ret
def pair(address, key):
'''
Pair the bluetooth adapter with a device
CLI Example::
salt '*' bluetooth.pair DE:AD:BE:EF:CA:FE 1234
Where DE:AD:BE:EF:CA:FE is the address of the device
to pair with, and 1234 is the passphrase.
'''
address = address()
cmd = 'echo "%s" | bluez-simple-agent %s %s' % (address['devname'], address, key)
out = __salt__['cmd.run'](cmd).split('\n')
return out
def unpair(address):
'''
Unpair the bluetooth adapter from a device
CLI Example::
salt '*' bluetooth.unpair DE:AD:BE:EF:CA:FE
Where DE:AD:BE:EF:CA:FE is the address of the device
to unpair.
'''
address = address()
cmd = 'bluez-test-device remove %s' % address
out = __salt__['cmd.run'](cmd).split('\n')
return out
|
|
b58f1ae369ae47f12494f816f0eeb69973e6baf8
|
test/test_utils.py
|
test/test_utils.py
|
# coding=utf-8
from u2flib_host.utils import (
u2str,
websafe_encode,
websafe_decode,
H,
)
def test_u2str():
data1 = {
u'greeting_en': u'Hello world',
u'greeting_se': u'Hallå världen',
u'recursive': {
'plaintext': [u'foo', 'bar', u'BΛZ'],
},
}
assert u2str(data1) == {
'greeting_en': 'Hello world',
'greeting_se': 'Hall\xc3\xa5 v\xc3\xa4rlden', # utf-8 encoded
'recursive': {
'plaintext': ['foo', 'bar', 'B\xce\x9bZ'],
},
}
def test_websafe_decode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_decode('') == ''
assert websafe_decode('Zg') == 'f'
assert websafe_decode('Zm8') == 'fo'
assert websafe_decode('Zm9v') == 'foo'
assert websafe_decode('Zm9vYg') == 'foob'
assert websafe_decode('Zm9vYmE') == 'fooba'
assert websafe_decode('Zm9vYmFy') == 'foobar'
def test_websafe_encode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_encode('') == ''
assert websafe_encode('f') == 'Zg'
assert websafe_encode('fo') == 'Zm8'
assert websafe_encode('foo') == 'Zm9v'
assert websafe_encode('foob') == 'Zm9vYg'
assert websafe_encode('fooba') == 'Zm9vYmE'
assert websafe_encode('foobar') == 'Zm9vYmFy'
def test_H():
# SHA-256 vectors adapted from http://www.nsrl.nist.gov/testdata/
assert H('abc') == '\xbax\x16\xbf\x8f\x01\xcf\xeaAA@\xde]\xae"#\xb0' \
'\x03a\xa3\x96\x17z\x9c\xb4\x10\xffa\xf2\x00\x15\xad'
|
Add unit tests for u2flib_host.utils
|
Add unit tests for u2flib_host.utils
|
Python
|
bsd-2-clause
|
Yubico/python-u2flib-host,moreati/python-u2flib-host
|
Add unit tests for u2flib_host.utils
|
# coding=utf-8
from u2flib_host.utils import (
u2str,
websafe_encode,
websafe_decode,
H,
)
def test_u2str():
data1 = {
u'greeting_en': u'Hello world',
u'greeting_se': u'Hallå världen',
u'recursive': {
'plaintext': [u'foo', 'bar', u'BΛZ'],
},
}
assert u2str(data1) == {
'greeting_en': 'Hello world',
'greeting_se': 'Hall\xc3\xa5 v\xc3\xa4rlden', # utf-8 encoded
'recursive': {
'plaintext': ['foo', 'bar', 'B\xce\x9bZ'],
},
}
def test_websafe_decode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_decode('') == ''
assert websafe_decode('Zg') == 'f'
assert websafe_decode('Zm8') == 'fo'
assert websafe_decode('Zm9v') == 'foo'
assert websafe_decode('Zm9vYg') == 'foob'
assert websafe_decode('Zm9vYmE') == 'fooba'
assert websafe_decode('Zm9vYmFy') == 'foobar'
def test_websafe_encode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_encode('') == ''
assert websafe_encode('f') == 'Zg'
assert websafe_encode('fo') == 'Zm8'
assert websafe_encode('foo') == 'Zm9v'
assert websafe_encode('foob') == 'Zm9vYg'
assert websafe_encode('fooba') == 'Zm9vYmE'
assert websafe_encode('foobar') == 'Zm9vYmFy'
def test_H():
# SHA-256 vectors adapted from http://www.nsrl.nist.gov/testdata/
assert H('abc') == '\xbax\x16\xbf\x8f\x01\xcf\xeaAA@\xde]\xae"#\xb0' \
'\x03a\xa3\x96\x17z\x9c\xb4\x10\xffa\xf2\x00\x15\xad'
|
<commit_before><commit_msg>Add unit tests for u2flib_host.utils<commit_after>
|
# coding=utf-8
from u2flib_host.utils import (
u2str,
websafe_encode,
websafe_decode,
H,
)
def test_u2str():
data1 = {
u'greeting_en': u'Hello world',
u'greeting_se': u'Hallå världen',
u'recursive': {
'plaintext': [u'foo', 'bar', u'BΛZ'],
},
}
assert u2str(data1) == {
'greeting_en': 'Hello world',
'greeting_se': 'Hall\xc3\xa5 v\xc3\xa4rlden', # utf-8 encoded
'recursive': {
'plaintext': ['foo', 'bar', 'B\xce\x9bZ'],
},
}
def test_websafe_decode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_decode('') == ''
assert websafe_decode('Zg') == 'f'
assert websafe_decode('Zm8') == 'fo'
assert websafe_decode('Zm9v') == 'foo'
assert websafe_decode('Zm9vYg') == 'foob'
assert websafe_decode('Zm9vYmE') == 'fooba'
assert websafe_decode('Zm9vYmFy') == 'foobar'
def test_websafe_encode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_encode('') == ''
assert websafe_encode('f') == 'Zg'
assert websafe_encode('fo') == 'Zm8'
assert websafe_encode('foo') == 'Zm9v'
assert websafe_encode('foob') == 'Zm9vYg'
assert websafe_encode('fooba') == 'Zm9vYmE'
assert websafe_encode('foobar') == 'Zm9vYmFy'
def test_H():
# SHA-256 vectors adapted from http://www.nsrl.nist.gov/testdata/
assert H('abc') == '\xbax\x16\xbf\x8f\x01\xcf\xeaAA@\xde]\xae"#\xb0' \
'\x03a\xa3\x96\x17z\x9c\xb4\x10\xffa\xf2\x00\x15\xad'
|
Add unit tests for u2flib_host.utils# coding=utf-8
from u2flib_host.utils import (
u2str,
websafe_encode,
websafe_decode,
H,
)
def test_u2str():
data1 = {
u'greeting_en': u'Hello world',
u'greeting_se': u'Hallå världen',
u'recursive': {
'plaintext': [u'foo', 'bar', u'BΛZ'],
},
}
assert u2str(data1) == {
'greeting_en': 'Hello world',
'greeting_se': 'Hall\xc3\xa5 v\xc3\xa4rlden', # utf-8 encoded
'recursive': {
'plaintext': ['foo', 'bar', 'B\xce\x9bZ'],
},
}
def test_websafe_decode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_decode('') == ''
assert websafe_decode('Zg') == 'f'
assert websafe_decode('Zm8') == 'fo'
assert websafe_decode('Zm9v') == 'foo'
assert websafe_decode('Zm9vYg') == 'foob'
assert websafe_decode('Zm9vYmE') == 'fooba'
assert websafe_decode('Zm9vYmFy') == 'foobar'
def test_websafe_encode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_encode('') == ''
assert websafe_encode('f') == 'Zg'
assert websafe_encode('fo') == 'Zm8'
assert websafe_encode('foo') == 'Zm9v'
assert websafe_encode('foob') == 'Zm9vYg'
assert websafe_encode('fooba') == 'Zm9vYmE'
assert websafe_encode('foobar') == 'Zm9vYmFy'
def test_H():
# SHA-256 vectors adapted from http://www.nsrl.nist.gov/testdata/
assert H('abc') == '\xbax\x16\xbf\x8f\x01\xcf\xeaAA@\xde]\xae"#\xb0' \
'\x03a\xa3\x96\x17z\x9c\xb4\x10\xffa\xf2\x00\x15\xad'
|
<commit_before><commit_msg>Add unit tests for u2flib_host.utils<commit_after># coding=utf-8
from u2flib_host.utils import (
u2str,
websafe_encode,
websafe_decode,
H,
)
def test_u2str():
data1 = {
u'greeting_en': u'Hello world',
u'greeting_se': u'Hallå världen',
u'recursive': {
'plaintext': [u'foo', 'bar', u'BΛZ'],
},
}
assert u2str(data1) == {
'greeting_en': 'Hello world',
'greeting_se': 'Hall\xc3\xa5 v\xc3\xa4rlden', # utf-8 encoded
'recursive': {
'plaintext': ['foo', 'bar', 'B\xce\x9bZ'],
},
}
def test_websafe_decode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_decode('') == ''
assert websafe_decode('Zg') == 'f'
assert websafe_decode('Zm8') == 'fo'
assert websafe_decode('Zm9v') == 'foo'
assert websafe_decode('Zm9vYg') == 'foob'
assert websafe_decode('Zm9vYmE') == 'fooba'
assert websafe_decode('Zm9vYmFy') == 'foobar'
def test_websafe_encode():
# Base64 vectors adapted from https://tools.ietf.org/html/rfc4648#section-10
assert websafe_encode('') == ''
assert websafe_encode('f') == 'Zg'
assert websafe_encode('fo') == 'Zm8'
assert websafe_encode('foo') == 'Zm9v'
assert websafe_encode('foob') == 'Zm9vYg'
assert websafe_encode('fooba') == 'Zm9vYmE'
assert websafe_encode('foobar') == 'Zm9vYmFy'
def test_H():
# SHA-256 vectors adapted from http://www.nsrl.nist.gov/testdata/
assert H('abc') == '\xbax\x16\xbf\x8f\x01\xcf\xeaAA@\xde]\xae"#\xb0' \
'\x03a\xa3\x96\x17z\x9c\xb4\x10\xffa\xf2\x00\x15\xad'
|
|
50e1e8d6fa24b0cbf7330411ce4d87b9cb351f54
|
benchexec/tools/deagle.py
|
benchexec/tools/deagle.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--32", "--no-unwinding-assertions", "--closure"]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
stroutput = str(output)
if isTimeout:
status = "TIMEOUT"
elif "SUCCESSFUL" in stroutput:
status = result.RESULT_TRUE_PROP
elif "FAILED" in stroutput:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in stroutput:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_UNKNOWN
return status
|
Add a tool info module for Deagle
|
Add a tool info module for Deagle
|
Python
|
apache-2.0
|
ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec
|
Add a tool info module for Deagle
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--32", "--no-unwinding-assertions", "--closure"]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
stroutput = str(output)
if isTimeout:
status = "TIMEOUT"
elif "SUCCESSFUL" in stroutput:
status = result.RESULT_TRUE_PROP
elif "FAILED" in stroutput:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in stroutput:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_UNKNOWN
return status
|
<commit_before><commit_msg>Add a tool info module for Deagle<commit_after>
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--32", "--no-unwinding-assertions", "--closure"]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
stroutput = str(output)
if isTimeout:
status = "TIMEOUT"
elif "SUCCESSFUL" in stroutput:
status = result.RESULT_TRUE_PROP
elif "FAILED" in stroutput:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in stroutput:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_UNKNOWN
return status
|
Add a tool info module for Deagle# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--32", "--no-unwinding-assertions", "--closure"]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
stroutput = str(output)
if isTimeout:
status = "TIMEOUT"
elif "SUCCESSFUL" in stroutput:
status = result.RESULT_TRUE_PROP
elif "FAILED" in stroutput:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in stroutput:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_UNKNOWN
return status
|
<commit_before><commit_msg>Add a tool info module for Deagle<commit_after># This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
def executable(self):
return util.find_executable("deagle")
def name(self):
return "Deagle"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
options = options + ["--32", "--no-unwinding-assertions", "--closure"]
return [executable] + options + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
stroutput = str(output)
if isTimeout:
status = "TIMEOUT"
elif "SUCCESSFUL" in stroutput:
status = result.RESULT_TRUE_PROP
elif "FAILED" in stroutput:
status = result.RESULT_FALSE_REACH
elif "UNKNOWN" in stroutput:
status = result.RESULT_UNKNOWN
else:
status = result.RESULT_UNKNOWN
return status
|
|
f6c287e5c75cd255952a70f5445ab9ece2c80612
|
merge.py
|
merge.py
|
# Merge addresses into buildings they intersect with
from fiona import collection
from rtree import index
from shapely.geometry import asShape, Point, LineString
from shapely import speedups
def merge(buildingIn, addressIn):
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
return {
'data': buildings,
'index': buildingIdx
}
|
Break out merging addresses and buildings.
|
Break out merging addresses and buildings.
|
Python
|
bsd-3-clause
|
osmlab/nycbuildings,osmlab/nycbuildings,osmlab/nycbuildings
|
Break out merging addresses and buildings.
|
# Merge addresses into buildings they intersect with
from fiona import collection
from rtree import index
from shapely.geometry import asShape, Point, LineString
from shapely import speedups
def merge(buildingIn, addressIn):
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
return {
'data': buildings,
'index': buildingIdx
}
|
<commit_before><commit_msg>Break out merging addresses and buildings.<commit_after>
|
# Merge addresses into buildings they intersect with
from fiona import collection
from rtree import index
from shapely.geometry import asShape, Point, LineString
from shapely import speedups
def merge(buildingIn, addressIn):
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
return {
'data': buildings,
'index': buildingIdx
}
|
Break out merging addresses and buildings.# Merge addresses into buildings they intersect with
from fiona import collection
from rtree import index
from shapely.geometry import asShape, Point, LineString
from shapely import speedups
def merge(buildingIn, addressIn):
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
return {
'data': buildings,
'index': buildingIdx
}
|
<commit_before><commit_msg>Break out merging addresses and buildings.<commit_after># Merge addresses into buildings they intersect with
from fiona import collection
from rtree import index
from shapely.geometry import asShape, Point, LineString
from shapely import speedups
def merge(buildingIn, addressIn):
addresses = []
with collection(addressIn, "r") as input:
for address in input:
shape = asShape(address['geometry'])
shape.original = address
addresses.append(shape)
# Load and index all buildings.
buildingIdx = index.Index()
buildings = []
with collection(buildingIn, "r") as input:
for building in input:
building['shape'] = asShape(building['geometry'])
building['properties']['addresses'] = []
buildings.append(building)
buildingIdx.add(len(buildings) - 1, building['shape'].bounds)
# Map addresses to buildings.
for address in addresses:
for i in buildingIdx.intersection(address.bounds):
if buildings[i]['shape'].contains(address):
buildings[i]['properties']['addresses'].append(
address.original)
return {
'data': buildings,
'index': buildingIdx
}
|
|
72cbe12890173e41db1ff01c241cb7f1fba58858
|
astrobin/management/commands/export_emails.py
|
astrobin/management/commands/export_emails.py
|
import csv
from django.core.management.base import BaseCommand
from astrobin.models import UserProfile
class Command(BaseCommand):
help = "Export all user emails to a CSV file"
def handle(self, *args, **options):
profiles = UserProfile.objects.exclude(user__email = None)
header = [['username', 'realname', 'email']]
values = list(
profiles.values_list('user__username', 'real_name', 'user__email'))
data = header + values
encoded = [(
x[0],
x[1].encode('utf-8') if x[1] is not None else '',
x[2].encode('utf-8')) for x in data]
with open('emails.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(encoded)
|
Add management command to export all user meails
|
Add management command to export all user meails
|
Python
|
agpl-3.0
|
astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin
|
Add management command to export all user meails
|
import csv
from django.core.management.base import BaseCommand
from astrobin.models import UserProfile
class Command(BaseCommand):
help = "Export all user emails to a CSV file"
def handle(self, *args, **options):
profiles = UserProfile.objects.exclude(user__email = None)
header = [['username', 'realname', 'email']]
values = list(
profiles.values_list('user__username', 'real_name', 'user__email'))
data = header + values
encoded = [(
x[0],
x[1].encode('utf-8') if x[1] is not None else '',
x[2].encode('utf-8')) for x in data]
with open('emails.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(encoded)
|
<commit_before><commit_msg>Add management command to export all user meails<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from astrobin.models import UserProfile
class Command(BaseCommand):
help = "Export all user emails to a CSV file"
def handle(self, *args, **options):
profiles = UserProfile.objects.exclude(user__email = None)
header = [['username', 'realname', 'email']]
values = list(
profiles.values_list('user__username', 'real_name', 'user__email'))
data = header + values
encoded = [(
x[0],
x[1].encode('utf-8') if x[1] is not None else '',
x[2].encode('utf-8')) for x in data]
with open('emails.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(encoded)
|
Add management command to export all user meailsimport csv
from django.core.management.base import BaseCommand
from astrobin.models import UserProfile
class Command(BaseCommand):
help = "Export all user emails to a CSV file"
def handle(self, *args, **options):
profiles = UserProfile.objects.exclude(user__email = None)
header = [['username', 'realname', 'email']]
values = list(
profiles.values_list('user__username', 'real_name', 'user__email'))
data = header + values
encoded = [(
x[0],
x[1].encode('utf-8') if x[1] is not None else '',
x[2].encode('utf-8')) for x in data]
with open('emails.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(encoded)
|
<commit_before><commit_msg>Add management command to export all user meails<commit_after>import csv
from django.core.management.base import BaseCommand
from astrobin.models import UserProfile
class Command(BaseCommand):
help = "Export all user emails to a CSV file"
def handle(self, *args, **options):
profiles = UserProfile.objects.exclude(user__email = None)
header = [['username', 'realname', 'email']]
values = list(
profiles.values_list('user__username', 'real_name', 'user__email'))
data = header + values
encoded = [(
x[0],
x[1].encode('utf-8') if x[1] is not None else '',
x[2].encode('utf-8')) for x in data]
with open('emails.csv', 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(encoded)
|
|
e7de5db51760e7874d2462c85449b869720da935
|
tests/run_tests.py
|
tests/run_tests.py
|
"""Run all unit tests."""
import glob
import os
import sys
import unittest
def main():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_dir, 'test_*.py'))
test_names = [os.path.basename(f)[:-3] for f in test_files]
sys.path.insert(0, os.path.join(test_dir, '..'))
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
sys.exit(1 if (result.errors or result.failures) else 0)
if __name__ == '__main__':
main()
|
Add script to run all tests
|
Add script to run all tests
|
Python
|
bsd-3-clause
|
benhoyt/symplate
|
Add script to run all tests
|
"""Run all unit tests."""
import glob
import os
import sys
import unittest
def main():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_dir, 'test_*.py'))
test_names = [os.path.basename(f)[:-3] for f in test_files]
sys.path.insert(0, os.path.join(test_dir, '..'))
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
sys.exit(1 if (result.errors or result.failures) else 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to run all tests<commit_after>
|
"""Run all unit tests."""
import glob
import os
import sys
import unittest
def main():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_dir, 'test_*.py'))
test_names = [os.path.basename(f)[:-3] for f in test_files]
sys.path.insert(0, os.path.join(test_dir, '..'))
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
sys.exit(1 if (result.errors or result.failures) else 0)
if __name__ == '__main__':
main()
|
Add script to run all tests"""Run all unit tests."""
import glob
import os
import sys
import unittest
def main():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_dir, 'test_*.py'))
test_names = [os.path.basename(f)[:-3] for f in test_files]
sys.path.insert(0, os.path.join(test_dir, '..'))
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
sys.exit(1 if (result.errors or result.failures) else 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to run all tests<commit_after>"""Run all unit tests."""
import glob
import os
import sys
import unittest
def main():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_dir, 'test_*.py'))
test_names = [os.path.basename(f)[:-3] for f in test_files]
sys.path.insert(0, os.path.join(test_dir, '..'))
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
sys.exit(1 if (result.errors or result.failures) else 0)
if __name__ == '__main__':
main()
|
|
668aa9aa7adf10f19290c198cf892323309bb389
|
tests/test_init.py
|
tests/test_init.py
|
from mock import Mock
import ubersmith
def it_sets_default_request_handler(monkeypatch):
set_handler_mock = Mock()
monkeypatch.setattr(ubersmith, 'set_default_request_handler',
set_handler_mock)
ubersmith.init('X-base_url', 'X-username', 'X-password', 'X-verify')
handler = set_handler_mock.call_args[0][0]
assert handler.base_url == 'X-base_url'
assert handler.username == 'X-username'
assert handler.password == 'X-password'
assert handler.verify == 'X-verify'
|
Add coverage for init function.
|
Add coverage for init function.
|
Python
|
mit
|
hivelocity/python-ubersmith,jasonkeene/python-ubersmith,jasonkeene/python-ubersmith,hivelocity/python-ubersmith
|
Add coverage for init function.
|
from mock import Mock
import ubersmith
def it_sets_default_request_handler(monkeypatch):
set_handler_mock = Mock()
monkeypatch.setattr(ubersmith, 'set_default_request_handler',
set_handler_mock)
ubersmith.init('X-base_url', 'X-username', 'X-password', 'X-verify')
handler = set_handler_mock.call_args[0][0]
assert handler.base_url == 'X-base_url'
assert handler.username == 'X-username'
assert handler.password == 'X-password'
assert handler.verify == 'X-verify'
|
<commit_before><commit_msg>Add coverage for init function.<commit_after>
|
from mock import Mock
import ubersmith
def it_sets_default_request_handler(monkeypatch):
set_handler_mock = Mock()
monkeypatch.setattr(ubersmith, 'set_default_request_handler',
set_handler_mock)
ubersmith.init('X-base_url', 'X-username', 'X-password', 'X-verify')
handler = set_handler_mock.call_args[0][0]
assert handler.base_url == 'X-base_url'
assert handler.username == 'X-username'
assert handler.password == 'X-password'
assert handler.verify == 'X-verify'
|
Add coverage for init function.from mock import Mock
import ubersmith
def it_sets_default_request_handler(monkeypatch):
set_handler_mock = Mock()
monkeypatch.setattr(ubersmith, 'set_default_request_handler',
set_handler_mock)
ubersmith.init('X-base_url', 'X-username', 'X-password', 'X-verify')
handler = set_handler_mock.call_args[0][0]
assert handler.base_url == 'X-base_url'
assert handler.username == 'X-username'
assert handler.password == 'X-password'
assert handler.verify == 'X-verify'
|
<commit_before><commit_msg>Add coverage for init function.<commit_after>from mock import Mock
import ubersmith
def it_sets_default_request_handler(monkeypatch):
set_handler_mock = Mock()
monkeypatch.setattr(ubersmith, 'set_default_request_handler',
set_handler_mock)
ubersmith.init('X-base_url', 'X-username', 'X-password', 'X-verify')
handler = set_handler_mock.call_args[0][0]
assert handler.base_url == 'X-base_url'
assert handler.username == 'X-username'
assert handler.password == 'X-password'
assert handler.verify == 'X-verify'
|
|
205a7f2c44bf831c58bc82bbff8f332d375acc46
|
tests/unit/test_inode.py
|
tests/unit/test_inode.py
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import time
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.inode
import pycdlib.pycdlibexception
def test_inode_new_initialized_twice():
ino = pycdlib.inode.Inode()
ino.new(0, '', False, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.new(0, '', False, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_parse_initialized_twice():
ino = pycdlib.inode.Inode()
ino.parse(0, 0, None, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.parse(0, 0, None, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.extent_location()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_set_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.set_extent_location(0)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_get_data_length_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.get_data_length()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_add_boot_info_table_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.add_boot_info_table(None)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_update_fp_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.update_fp(None, 0)
assert(str(excinfo.value) == 'Inode is not initialized')
|
Add unit tests for inode.
|
Add unit tests for inode.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@openrobotics.org>
|
Python
|
lgpl-2.1
|
clalancette/pycdlib,clalancette/pyiso
|
Add unit tests for inode.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@openrobotics.org>
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import time
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.inode
import pycdlib.pycdlibexception
def test_inode_new_initialized_twice():
ino = pycdlib.inode.Inode()
ino.new(0, '', False, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.new(0, '', False, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_parse_initialized_twice():
ino = pycdlib.inode.Inode()
ino.parse(0, 0, None, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.parse(0, 0, None, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.extent_location()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_set_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.set_extent_location(0)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_get_data_length_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.get_data_length()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_add_boot_info_table_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.add_boot_info_table(None)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_update_fp_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.update_fp(None, 0)
assert(str(excinfo.value) == 'Inode is not initialized')
|
<commit_before><commit_msg>Add unit tests for inode.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@openrobotics.org><commit_after>
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import time
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.inode
import pycdlib.pycdlibexception
def test_inode_new_initialized_twice():
ino = pycdlib.inode.Inode()
ino.new(0, '', False, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.new(0, '', False, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_parse_initialized_twice():
ino = pycdlib.inode.Inode()
ino.parse(0, 0, None, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.parse(0, 0, None, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.extent_location()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_set_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.set_extent_location(0)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_get_data_length_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.get_data_length()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_add_boot_info_table_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.add_boot_info_table(None)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_update_fp_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.update_fp(None, 0)
assert(str(excinfo.value) == 'Inode is not initialized')
|
Add unit tests for inode.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@openrobotics.org>from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import time
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.inode
import pycdlib.pycdlibexception
def test_inode_new_initialized_twice():
ino = pycdlib.inode.Inode()
ino.new(0, '', False, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.new(0, '', False, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_parse_initialized_twice():
ino = pycdlib.inode.Inode()
ino.parse(0, 0, None, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.parse(0, 0, None, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.extent_location()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_set_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.set_extent_location(0)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_get_data_length_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.get_data_length()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_add_boot_info_table_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.add_boot_info_table(None)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_update_fp_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.update_fp(None, 0)
assert(str(excinfo.value) == 'Inode is not initialized')
|
<commit_before><commit_msg>Add unit tests for inode.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@openrobotics.org><commit_after>from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
import time
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.inode
import pycdlib.pycdlibexception
def test_inode_new_initialized_twice():
ino = pycdlib.inode.Inode()
ino.new(0, '', False, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.new(0, '', False, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_parse_initialized_twice():
ino = pycdlib.inode.Inode()
ino.parse(0, 0, None, 0)
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.parse(0, 0, None, 0)
assert(str(excinfo.value) == 'Inode is already initialized')
def test_inode_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.extent_location()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_set_extent_location_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.set_extent_location(0)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_get_data_length_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.get_data_length()
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_add_boot_info_table_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.add_boot_info_table(None)
assert(str(excinfo.value) == 'Inode is not initialized')
def test_inode_update_fp_not_initialized():
ino = pycdlib.inode.Inode()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
ino.update_fp(None, 0)
assert(str(excinfo.value) == 'Inode is not initialized')
|
|
ccc4e66aad6ca02ecb85d048f343b299d077ccea
|
plots/plot-jacobian-matrix.py
|
plots/plot-jacobian-matrix.py
|
import climate
import lmj.cubes
import lmj.plot
import numpy as np
@lmj.cubes.utils.pickled
def jacobian(root, pattern, frames):
trial = list(lmj.cubes.Experiment(root).trials_matching(pattern))[0]
trial.load()
return trial.jacobian(frames)
def main(root, pattern='68/*block03/*trial00', frames=10, frame=29.7): # 34.5
_, jac = jacobian(root, pattern, frames)
cols = jac.columns
n = int(np.sqrt(len(cols) / 9))
def find(g, b):
cs = [c for c in cols if '{}/'.format(g) in c and c.endswith(b)]
return jac[cs].loc[frame, :].values.reshape((n, n))
def plot(where, g, b):
ax = lmj.plot.create_axes(where, spines=False)
ax.colorbar(ax.imshow(find(g, b), vmin=-1, vmax=1, cmap='coolwarm'))
#ax.set_xlabel('{}{}'.format(g, b))
plot(331, 'x', 'x')
plot(332, 'x', 'y')
plot(333, 'x', 'z')
plot(334, 'y', 'x')
plot(335, 'y', 'y')
plot(336, 'y', 'z')
plot(337, 'z', 'x')
plot(338, 'z', 'y')
plot(339, 'z', 'z')
lmj.plot.gcf().subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
|
Add script for displaying jacobian as a matrix.
|
Add script for displaying jacobian as a matrix.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add script for displaying jacobian as a matrix.
|
import climate
import lmj.cubes
import lmj.plot
import numpy as np
@lmj.cubes.utils.pickled
def jacobian(root, pattern, frames):
trial = list(lmj.cubes.Experiment(root).trials_matching(pattern))[0]
trial.load()
return trial.jacobian(frames)
def main(root, pattern='68/*block03/*trial00', frames=10, frame=29.7): # 34.5
_, jac = jacobian(root, pattern, frames)
cols = jac.columns
n = int(np.sqrt(len(cols) / 9))
def find(g, b):
cs = [c for c in cols if '{}/'.format(g) in c and c.endswith(b)]
return jac[cs].loc[frame, :].values.reshape((n, n))
def plot(where, g, b):
ax = lmj.plot.create_axes(where, spines=False)
ax.colorbar(ax.imshow(find(g, b), vmin=-1, vmax=1, cmap='coolwarm'))
#ax.set_xlabel('{}{}'.format(g, b))
plot(331, 'x', 'x')
plot(332, 'x', 'y')
plot(333, 'x', 'z')
plot(334, 'y', 'x')
plot(335, 'y', 'y')
plot(336, 'y', 'z')
plot(337, 'z', 'x')
plot(338, 'z', 'y')
plot(339, 'z', 'z')
lmj.plot.gcf().subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for displaying jacobian as a matrix.<commit_after>
|
import climate
import lmj.cubes
import lmj.plot
import numpy as np
@lmj.cubes.utils.pickled
def jacobian(root, pattern, frames):
trial = list(lmj.cubes.Experiment(root).trials_matching(pattern))[0]
trial.load()
return trial.jacobian(frames)
def main(root, pattern='68/*block03/*trial00', frames=10, frame=29.7): # 34.5
_, jac = jacobian(root, pattern, frames)
cols = jac.columns
n = int(np.sqrt(len(cols) / 9))
def find(g, b):
cs = [c for c in cols if '{}/'.format(g) in c and c.endswith(b)]
return jac[cs].loc[frame, :].values.reshape((n, n))
def plot(where, g, b):
ax = lmj.plot.create_axes(where, spines=False)
ax.colorbar(ax.imshow(find(g, b), vmin=-1, vmax=1, cmap='coolwarm'))
#ax.set_xlabel('{}{}'.format(g, b))
plot(331, 'x', 'x')
plot(332, 'x', 'y')
plot(333, 'x', 'z')
plot(334, 'y', 'x')
plot(335, 'y', 'y')
plot(336, 'y', 'z')
plot(337, 'z', 'x')
plot(338, 'z', 'y')
plot(339, 'z', 'z')
lmj.plot.gcf().subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
|
Add script for displaying jacobian as a matrix.import climate
import lmj.cubes
import lmj.plot
import numpy as np
@lmj.cubes.utils.pickled
def jacobian(root, pattern, frames):
trial = list(lmj.cubes.Experiment(root).trials_matching(pattern))[0]
trial.load()
return trial.jacobian(frames)
def main(root, pattern='68/*block03/*trial00', frames=10, frame=29.7): # 34.5
_, jac = jacobian(root, pattern, frames)
cols = jac.columns
n = int(np.sqrt(len(cols) / 9))
def find(g, b):
cs = [c for c in cols if '{}/'.format(g) in c and c.endswith(b)]
return jac[cs].loc[frame, :].values.reshape((n, n))
def plot(where, g, b):
ax = lmj.plot.create_axes(where, spines=False)
ax.colorbar(ax.imshow(find(g, b), vmin=-1, vmax=1, cmap='coolwarm'))
#ax.set_xlabel('{}{}'.format(g, b))
plot(331, 'x', 'x')
plot(332, 'x', 'y')
plot(333, 'x', 'z')
plot(334, 'y', 'x')
plot(335, 'y', 'y')
plot(336, 'y', 'z')
plot(337, 'z', 'x')
plot(338, 'z', 'y')
plot(339, 'z', 'z')
lmj.plot.gcf().subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for displaying jacobian as a matrix.<commit_after>import climate
import lmj.cubes
import lmj.plot
import numpy as np
@lmj.cubes.utils.pickled
def jacobian(root, pattern, frames):
trial = list(lmj.cubes.Experiment(root).trials_matching(pattern))[0]
trial.load()
return trial.jacobian(frames)
def main(root, pattern='68/*block03/*trial00', frames=10, frame=29.7): # 34.5
_, jac = jacobian(root, pattern, frames)
cols = jac.columns
n = int(np.sqrt(len(cols) / 9))
def find(g, b):
cs = [c for c in cols if '{}/'.format(g) in c and c.endswith(b)]
return jac[cs].loc[frame, :].values.reshape((n, n))
def plot(where, g, b):
ax = lmj.plot.create_axes(where, spines=False)
ax.colorbar(ax.imshow(find(g, b), vmin=-1, vmax=1, cmap='coolwarm'))
#ax.set_xlabel('{}{}'.format(g, b))
plot(331, 'x', 'x')
plot(332, 'x', 'y')
plot(333, 'x', 'z')
plot(334, 'y', 'x')
plot(335, 'y', 'y')
plot(336, 'y', 'z')
plot(337, 'z', 'x')
plot(338, 'z', 'y')
plot(339, 'z', 'z')
lmj.plot.gcf().subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
lmj.plot.show()
if __name__ == '__main__':
climate.call(main)
|
|
4129e3325fddb10c4edc0ab70c25bff4de75ed32
|
setup.py
|
setup.py
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.7'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
Move Pillow to the last version
|
Move Pillow to the last version
|
Python
|
bsd-3-clause
|
WillsB3/glue,beni55/glue,dext0r/glue,jorgebastida/glue,dext0r/glue,zhiqinyigu/glue,WillsB3/glue,zhiqinyigu/glue,beni55/glue,jorgebastida/glue
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
Move Pillow to the last version
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.7'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
<commit_before>try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
<commit_msg>Move Pillow to the last version<commit_after>
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.7'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
Move Pillow to the last versiontry:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.7'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
<commit_before>try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
<commit_msg>Move Pillow to the last version<commit_after>try:
from setuptools import setup
kw = {'entry_points':
"""[console_scripts]\nglue = glue:main\n""",
'zip_safe': False}
except ImportError:
from distutils.core import setup
kw = {'scripts': ['glue.py']}
setup(
name='glue',
version='0.2.6.1',
url='http://github.com/jorgebastida/glue',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Glue is a simple command line tool to generate CSS sprites.',
long_description=('Glue is a simple command line tool to generate CSS '
'sprites using any kind of source images like '
'PNG, JPEG or GIF. Glue will generate a unique PNG '
'file containing every source image and a CSS file '
'including the necessary CSS classes to use the '
'sprite.'),
py_modules=['glue'],
platforms='any',
install_requires=[
'Pillow==1.7.7'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
**kw
)
|
b2f94ebfb2c2549322b2ffb1da91cdba361461f1
|
mezzanine/core/sitemaps.py
|
mezzanine/core/sitemaps.py
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
items = {}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
from mezzanine.utils.urls import home_slug
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
# Fake homepage object.
home = Displayable()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
Add homepage url to sitemap.xml
|
Add homepage url to sitemap.xml
|
Python
|
bsd-2-clause
|
frankier/mezzanine,tuxinhang1989/mezzanine,Cajoline/mezzanine,sjuxax/mezzanine,orlenko/plei,SoLoHiC/mezzanine,jerivas/mezzanine,promil23/mezzanine,PegasusWang/mezzanine,dsanders11/mezzanine,readevalprint/mezzanine,joshcartme/mezzanine,sjdines/mezzanine,SoLoHiC/mezzanine,Skytorn86/mezzanine,jerivas/mezzanine,dsanders11/mezzanine,nikolas/mezzanine,sjdines/mezzanine,ryneeverett/mezzanine,saintbird/mezzanine,sjdines/mezzanine,eino-makitalo/mezzanine,Cicero-Zhao/mezzanine,damnfine/mezzanine,spookylukey/mezzanine,sjuxax/mezzanine,biomassives/mezzanine,theclanks/mezzanine,scarcry/snm-mezzanine,christianwgd/mezzanine,dsanders11/mezzanine,dekomote/mezzanine-modeltranslation-backport,molokov/mezzanine,webounty/mezzanine,jjz/mezzanine,damnfine/mezzanine,stephenmcd/mezzanine,Cajoline/mezzanine,scarcry/snm-mezzanine,frankier/mezzanine,ZeroXn/mezzanine,orlenko/sfpirg,wbtuomela/mezzanine,SoLoHiC/mezzanine,cccs-web/mezzanine,viaregio/mezzanine,orlenko/plei,gradel/mezzanine,readevalprint/mezzanine,nikolas/mezzanine,wbtuomela/mezzanine,PegasusWang/mezzanine,dovydas/mezzanine,geodesign/mezzanine,Kniyl/mezzanine,agepoly/mezzanine,spookylukey/mezzanine,AlexHill/mezzanine,ryneeverett/mezzanine,frankchin/mezzanine,wrwrwr/mezzanine,wyzex/mezzanine,geodesign/mezzanine,stbarnabas/mezzanine,agepoly/mezzanine,gradel/mezzanine,gradel/mezzanine,viaregio/mezzanine,emile2016/mezzanine,ZeroXn/mezzanine,dovydas/mezzanine,biomassives/mezzanine,Cajoline/mezzanine,geodesign/mezzanine,dustinrb/mezzanine,wyzex/mezzanine,fusionbox/mezzanine,sjuxax/mezzanine,Skytorn86/mezzanine,promil23/mezzanine,mush42/mezzanine,theclanks/mezzanine,cccs-web/mezzanine,spookylukey/mezzanine,douglaskastle/mezzanine,jjz/mezzanine,saintbird/mezzanine,Kniyl/mezzanine,webounty/mezzanine,adrian-the-git/mezzanine,jerivas/mezzanine,molokov/mezzanine,biomassives/mezzanine,frankier/mezzanine,christianwgd/mezzanine,emile2016/mezzanine,Kniyl/mezzanine,molokov/mezzanine,batpad/mezzanine,wbtuomela/mezzanine,joshcartme/mezzanine,theclanks/mezzanine,promil23/mezzanine,industrydive/mezzanine,douglaskastle/mezzanine,dovydas/mezzanine,PegasusWang/mezzanine,tuxinhang1989/mezzanine,christianwgd/mezzanine,AlexHill/mezzanine,saintbird/mezzanine,frankchin/mezzanine,viaregio/mezzanine,damnfine/mezzanine,dekomote/mezzanine-modeltranslation-backport,orlenko/sfpirg,adrian-the-git/mezzanine,industrydive/mezzanine,adrian-the-git/mezzanine,ryneeverett/mezzanine,scarcry/snm-mezzanine,dustinrb/mezzanine,dustinrb/mezzanine,tuxinhang1989/mezzanine,industrydive/mezzanine,stephenmcd/mezzanine,Skytorn86/mezzanine,frankchin/mezzanine,emile2016/mezzanine,readevalprint/mezzanine,vladir/mezzanine,fusionbox/mezzanine,joshcartme/mezzanine,Cicero-Zhao/mezzanine,stbarnabas/mezzanine,wyzex/mezzanine,jjz/mezzanine,ZeroXn/mezzanine,vladir/mezzanine,mush42/mezzanine,webounty/mezzanine,orlenko/plei,orlenko/sfpirg,douglaskastle/mezzanine,stephenmcd/mezzanine,dekomote/mezzanine-modeltranslation-backport,wrwrwr/mezzanine,eino-makitalo/mezzanine,mush42/mezzanine,eino-makitalo/mezzanine,agepoly/mezzanine,batpad/mezzanine,nikolas/mezzanine,vladir/mezzanine
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
items = {}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
Add homepage url to sitemap.xml
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
from mezzanine.utils.urls import home_slug
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
# Fake homepage object.
home = Displayable()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
<commit_before>
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
items = {}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
<commit_msg>Add homepage url to sitemap.xml<commit_after>
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
from mezzanine.utils.urls import home_slug
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
# Fake homepage object.
home = Displayable()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
items = {}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
Add homepage url to sitemap.xml
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
from mezzanine.utils.urls import home_slug
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
# Fake homepage object.
home = Displayable()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
<commit_before>
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
items = {}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
<commit_msg>Add homepage url to sitemap.xml<commit_after>
from django.contrib.sitemaps import Sitemap
from django.db.models import get_models
from mezzanine.conf import settings
from mezzanine.core.models import Displayable
from mezzanine.utils.urls import home_slug
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
from mezzanine.blog.models import BlogPost
class DisplayableSitemap(Sitemap):
"""
Sitemap class for Django's sitemaps framework that returns
all published items for models that subclass ``Displayable``.
"""
def items(self):
"""
Return all published items for models that subclass
``Displayable``, excluding those that point to external sites.
"""
# Fake homepage object.
home = Displayable()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, Displayable):
for item in (model.objects.published()
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items.values()
def lastmod(self, obj):
if blog_installed and isinstance(obj, BlogPost):
return obj.publish_date
|
f75a0938f34912d47d844da467931e9a18a47b9f
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.24",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.26",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
Upgrade taskcluster client for nice slugids
|
Upgrade taskcluster client for nice slugids
This fixes e.g. this failure:
* https://tools.taskcluster.net/task-inspector/#2AAxnGTzSeGLTX_Hwp_PVg/
due to the TaskGroupId starting with a '-', by upgrading taskcluster
client. From version 0.0.26 of python taskcluster client onwards, 'nice'
slugs are returned that start with [A-Za-f].
|
Python
|
mpl-2.0
|
mozilla/funsize,rail/funsize,rail/funsize,mozilla/funsize
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.24",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
Upgrade taskcluster client for nice slugids
This fixes e.g. this failure:
* https://tools.taskcluster.net/task-inspector/#2AAxnGTzSeGLTX_Hwp_PVg/
due to the TaskGroupId starting with a '-', by upgrading taskcluster
client. From version 0.0.26 of python taskcluster client onwards, 'nice'
slugs are returned that start with [A-Za-f].
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.26",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
<commit_before>from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.24",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
<commit_msg>Upgrade taskcluster client for nice slugids
This fixes e.g. this failure:
* https://tools.taskcluster.net/task-inspector/#2AAxnGTzSeGLTX_Hwp_PVg/
due to the TaskGroupId starting with a '-', by upgrading taskcluster
client. From version 0.0.26 of python taskcluster client onwards, 'nice'
slugs are returned that start with [A-Za-f].<commit_after>
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.26",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.24",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
Upgrade taskcluster client for nice slugids
This fixes e.g. this failure:
* https://tools.taskcluster.net/task-inspector/#2AAxnGTzSeGLTX_Hwp_PVg/
due to the TaskGroupId starting with a '-', by upgrading taskcluster
client. From version 0.0.26 of python taskcluster client onwards, 'nice'
slugs are returned that start with [A-Za-f].from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.26",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
<commit_before>from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.24",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
<commit_msg>Upgrade taskcluster client for nice slugids
This fixes e.g. this failure:
* https://tools.taskcluster.net/task-inspector/#2AAxnGTzSeGLTX_Hwp_PVg/
due to the TaskGroupId starting with a '-', by upgrading taskcluster
client. From version 0.0.26 of python taskcluster client onwards, 'nice'
slugs are returned that start with [A-Za-f].<commit_after>from setuptools import setup
setup(
name="funsize",
version="0.42",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"more_itertools",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.26",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
|
4dbf364177da4f06b366a68b1458ec55c8c1895f
|
docs/usage_example.py
|
docs/usage_example.py
|
from pandarus import *
import geopandas as gpd
import rasterio
import os
import json
from pprint import pprint
# Get filepaths of data used in tests
grid_fp = os.path.join('..', 'tests', 'data', 'grid.geojson')
points_fp = os.path.join('..', 'tests', 'data', 'points.geojson')
square_fp = os.path.join('..', 'tests', 'data', 'square.geojson')
lines_fp = os.path.join('..', 'tests', 'data', 'lines.geojson')
range_fp = os.path.join('..', 'tests', 'data', 'range.tif')
# Load test fixtures
grid = gpd.read_file(grid_fp)
square = gpd.read_file(square_fp)
points = gpd.read_file(points_fp)
lines = gpd.read_file(lines_fp)
# Intersecting polygons
spatial_result, json_data = intersect(
square_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting polygons")
pprint(json.load(open(json_data)))
# Intersecting lines
spatial_result, json_data = intersect(
lines_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting lines")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Intersecting points
spatial_result, json_data = intersect(
points_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting points")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for polygons
json_data = raster_statistics(
grid_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for polygons")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for lines
json_data = raster_statistics(
lines_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for lines")
pprint(json.load(open(json_data)))
# Getting raster statistics for points
json_data = raster_statistics(
points_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for points")
pprint(json.load(open(json_data)))
|
Add usage example as a python file
|
Add usage example as a python file
|
Python
|
bsd-3-clause
|
cmutel/pandarus
|
Add usage example as a python file
|
from pandarus import *
import geopandas as gpd
import rasterio
import os
import json
from pprint import pprint
# Get filepaths of data used in tests
grid_fp = os.path.join('..', 'tests', 'data', 'grid.geojson')
points_fp = os.path.join('..', 'tests', 'data', 'points.geojson')
square_fp = os.path.join('..', 'tests', 'data', 'square.geojson')
lines_fp = os.path.join('..', 'tests', 'data', 'lines.geojson')
range_fp = os.path.join('..', 'tests', 'data', 'range.tif')
# Load test fixtures
grid = gpd.read_file(grid_fp)
square = gpd.read_file(square_fp)
points = gpd.read_file(points_fp)
lines = gpd.read_file(lines_fp)
# Intersecting polygons
spatial_result, json_data = intersect(
square_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting polygons")
pprint(json.load(open(json_data)))
# Intersecting lines
spatial_result, json_data = intersect(
lines_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting lines")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Intersecting points
spatial_result, json_data = intersect(
points_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting points")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for polygons
json_data = raster_statistics(
grid_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for polygons")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for lines
json_data = raster_statistics(
lines_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for lines")
pprint(json.load(open(json_data)))
# Getting raster statistics for points
json_data = raster_statistics(
points_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for points")
pprint(json.load(open(json_data)))
|
<commit_before><commit_msg>Add usage example as a python file<commit_after>
|
from pandarus import *
import geopandas as gpd
import rasterio
import os
import json
from pprint import pprint
# Get filepaths of data used in tests
grid_fp = os.path.join('..', 'tests', 'data', 'grid.geojson')
points_fp = os.path.join('..', 'tests', 'data', 'points.geojson')
square_fp = os.path.join('..', 'tests', 'data', 'square.geojson')
lines_fp = os.path.join('..', 'tests', 'data', 'lines.geojson')
range_fp = os.path.join('..', 'tests', 'data', 'range.tif')
# Load test fixtures
grid = gpd.read_file(grid_fp)
square = gpd.read_file(square_fp)
points = gpd.read_file(points_fp)
lines = gpd.read_file(lines_fp)
# Intersecting polygons
spatial_result, json_data = intersect(
square_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting polygons")
pprint(json.load(open(json_data)))
# Intersecting lines
spatial_result, json_data = intersect(
lines_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting lines")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Intersecting points
spatial_result, json_data = intersect(
points_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting points")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for polygons
json_data = raster_statistics(
grid_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for polygons")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for lines
json_data = raster_statistics(
lines_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for lines")
pprint(json.load(open(json_data)))
# Getting raster statistics for points
json_data = raster_statistics(
points_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for points")
pprint(json.load(open(json_data)))
|
Add usage example as a python filefrom pandarus import *
import geopandas as gpd
import rasterio
import os
import json
from pprint import pprint
# Get filepaths of data used in tests
grid_fp = os.path.join('..', 'tests', 'data', 'grid.geojson')
points_fp = os.path.join('..', 'tests', 'data', 'points.geojson')
square_fp = os.path.join('..', 'tests', 'data', 'square.geojson')
lines_fp = os.path.join('..', 'tests', 'data', 'lines.geojson')
range_fp = os.path.join('..', 'tests', 'data', 'range.tif')
# Load test fixtures
grid = gpd.read_file(grid_fp)
square = gpd.read_file(square_fp)
points = gpd.read_file(points_fp)
lines = gpd.read_file(lines_fp)
# Intersecting polygons
spatial_result, json_data = intersect(
square_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting polygons")
pprint(json.load(open(json_data)))
# Intersecting lines
spatial_result, json_data = intersect(
lines_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting lines")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Intersecting points
spatial_result, json_data = intersect(
points_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting points")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for polygons
json_data = raster_statistics(
grid_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for polygons")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for lines
json_data = raster_statistics(
lines_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for lines")
pprint(json.load(open(json_data)))
# Getting raster statistics for points
json_data = raster_statistics(
points_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for points")
pprint(json.load(open(json_data)))
|
<commit_before><commit_msg>Add usage example as a python file<commit_after>from pandarus import *
import geopandas as gpd
import rasterio
import os
import json
from pprint import pprint
# Get filepaths of data used in tests
grid_fp = os.path.join('..', 'tests', 'data', 'grid.geojson')
points_fp = os.path.join('..', 'tests', 'data', 'points.geojson')
square_fp = os.path.join('..', 'tests', 'data', 'square.geojson')
lines_fp = os.path.join('..', 'tests', 'data', 'lines.geojson')
range_fp = os.path.join('..', 'tests', 'data', 'range.tif')
# Load test fixtures
grid = gpd.read_file(grid_fp)
square = gpd.read_file(square_fp)
points = gpd.read_file(points_fp)
lines = gpd.read_file(lines_fp)
# Intersecting polygons
spatial_result, json_data = intersect(
square_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting polygons")
pprint(json.load(open(json_data)))
# Intersecting lines
spatial_result, json_data = intersect(
lines_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting lines")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Intersecting points
spatial_result, json_data = intersect(
points_fp,
'name',
grid_fp,
'name',
compress=False
)
# Load resulting output data
print("Output from intersecting points")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for polygons
json_data = raster_statistics(
grid_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for polygons")
pprint(json.load(open(json_data)))
print("Vector file with calculated intersections written to:", spatial_result)
# Getting raster statistics for lines
json_data = raster_statistics(
lines_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for lines")
pprint(json.load(open(json_data)))
# Getting raster statistics for points
json_data = raster_statistics(
points_fp,
'name',
range_fp,
compress=False
)
# Load resulting output data
print("Output from raster statistics for points")
pprint(json.load(open(json_data)))
|
|
7d090b22eb1f4aa841207a3940ce485b8539af5c
|
tests/test_provider_mbta.py
|
tests/test_provider_mbta.py
|
import busbus
from busbus.provider.mbta import MBTAProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def mbta_provider(engine):
responses.add(responses.GET, MBTAProvider.gtfs_url,
body=mock_gtfs_zip('mbta'), status=200,
content_type='application/zip')
return MBTAProvider('fake API key', engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(mbta_provider):
assert len(list(mbta_provider.routes)) == 1
def test_agency_phoneword_e164(mbta_provider):
agency = mbta_provider.get(busbus.Agency, id=u'2')
assert agency.phone_e164 == '+18002356426'
def test_bu_central_children(mbta_provider):
children = set(s.id for s in mbta_provider.get(
busbus.Stop, u'place-bucen').children)
assert children == set((u'70144', u'70145'))
def test_green_to_bu(mbta_provider):
stop = mbta_provider.get(busbus.Stop, u'70144')
route = mbta_provider.get(busbus.Route, u'Green-B')
assert len(list(mbta_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-04:00'),
end_time=arrow.get('2015-03-10T16:00:00-04:00')))) == 17
|
Add MBTA provider test cases
|
Add MBTA provider test cases
|
Python
|
mit
|
spaceboats/busbus
|
Add MBTA provider test cases
|
import busbus
from busbus.provider.mbta import MBTAProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def mbta_provider(engine):
responses.add(responses.GET, MBTAProvider.gtfs_url,
body=mock_gtfs_zip('mbta'), status=200,
content_type='application/zip')
return MBTAProvider('fake API key', engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(mbta_provider):
assert len(list(mbta_provider.routes)) == 1
def test_agency_phoneword_e164(mbta_provider):
agency = mbta_provider.get(busbus.Agency, id=u'2')
assert agency.phone_e164 == '+18002356426'
def test_bu_central_children(mbta_provider):
children = set(s.id for s in mbta_provider.get(
busbus.Stop, u'place-bucen').children)
assert children == set((u'70144', u'70145'))
def test_green_to_bu(mbta_provider):
stop = mbta_provider.get(busbus.Stop, u'70144')
route = mbta_provider.get(busbus.Route, u'Green-B')
assert len(list(mbta_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-04:00'),
end_time=arrow.get('2015-03-10T16:00:00-04:00')))) == 17
|
<commit_before><commit_msg>Add MBTA provider test cases<commit_after>
|
import busbus
from busbus.provider.mbta import MBTAProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def mbta_provider(engine):
responses.add(responses.GET, MBTAProvider.gtfs_url,
body=mock_gtfs_zip('mbta'), status=200,
content_type='application/zip')
return MBTAProvider('fake API key', engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(mbta_provider):
assert len(list(mbta_provider.routes)) == 1
def test_agency_phoneword_e164(mbta_provider):
agency = mbta_provider.get(busbus.Agency, id=u'2')
assert agency.phone_e164 == '+18002356426'
def test_bu_central_children(mbta_provider):
children = set(s.id for s in mbta_provider.get(
busbus.Stop, u'place-bucen').children)
assert children == set((u'70144', u'70145'))
def test_green_to_bu(mbta_provider):
stop = mbta_provider.get(busbus.Stop, u'70144')
route = mbta_provider.get(busbus.Route, u'Green-B')
assert len(list(mbta_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-04:00'),
end_time=arrow.get('2015-03-10T16:00:00-04:00')))) == 17
|
Add MBTA provider test casesimport busbus
from busbus.provider.mbta import MBTAProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def mbta_provider(engine):
responses.add(responses.GET, MBTAProvider.gtfs_url,
body=mock_gtfs_zip('mbta'), status=200,
content_type='application/zip')
return MBTAProvider('fake API key', engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(mbta_provider):
assert len(list(mbta_provider.routes)) == 1
def test_agency_phoneword_e164(mbta_provider):
agency = mbta_provider.get(busbus.Agency, id=u'2')
assert agency.phone_e164 == '+18002356426'
def test_bu_central_children(mbta_provider):
children = set(s.id for s in mbta_provider.get(
busbus.Stop, u'place-bucen').children)
assert children == set((u'70144', u'70145'))
def test_green_to_bu(mbta_provider):
stop = mbta_provider.get(busbus.Stop, u'70144')
route = mbta_provider.get(busbus.Route, u'Green-B')
assert len(list(mbta_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-04:00'),
end_time=arrow.get('2015-03-10T16:00:00-04:00')))) == 17
|
<commit_before><commit_msg>Add MBTA provider test cases<commit_after>import busbus
from busbus.provider.mbta import MBTAProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def mbta_provider(engine):
responses.add(responses.GET, MBTAProvider.gtfs_url,
body=mock_gtfs_zip('mbta'), status=200,
content_type='application/zip')
return MBTAProvider('fake API key', engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(mbta_provider):
assert len(list(mbta_provider.routes)) == 1
def test_agency_phoneword_e164(mbta_provider):
agency = mbta_provider.get(busbus.Agency, id=u'2')
assert agency.phone_e164 == '+18002356426'
def test_bu_central_children(mbta_provider):
children = set(s.id for s in mbta_provider.get(
busbus.Stop, u'place-bucen').children)
assert children == set((u'70144', u'70145'))
def test_green_to_bu(mbta_provider):
stop = mbta_provider.get(busbus.Stop, u'70144')
route = mbta_provider.get(busbus.Route, u'Green-B')
assert len(list(mbta_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-04:00'),
end_time=arrow.get('2015-03-10T16:00:00-04:00')))) == 17
|
|
c36a088ad0d56f2a4dbff85bc33922ab95fbc184
|
test_board_pytest.py
|
test_board_pytest.py
|
from board import Board
def test_addPiece():
print("Testing adding a piece.")
board = Board(5,5)
board.addPiece(0, 1)
assert board.boardMatrix.item(0,4) == 1
|
Add test for adding piece to board.
|
Add test for adding piece to board.
|
Python
|
mit
|
isaacarvestad/four-in-a-row
|
Add test for adding piece to board.
|
from board import Board
def test_addPiece():
print("Testing adding a piece.")
board = Board(5,5)
board.addPiece(0, 1)
assert board.boardMatrix.item(0,4) == 1
|
<commit_before><commit_msg>Add test for adding piece to board.<commit_after>
|
from board import Board
def test_addPiece():
print("Testing adding a piece.")
board = Board(5,5)
board.addPiece(0, 1)
assert board.boardMatrix.item(0,4) == 1
|
Add test for adding piece to board.from board import Board
def test_addPiece():
print("Testing adding a piece.")
board = Board(5,5)
board.addPiece(0, 1)
assert board.boardMatrix.item(0,4) == 1
|
<commit_before><commit_msg>Add test for adding piece to board.<commit_after>from board import Board
def test_addPiece():
print("Testing adding a piece.")
board = Board(5,5)
board.addPiece(0, 1)
assert board.boardMatrix.item(0,4) == 1
|
|
71c8debf81eb85f4ae1de8e58f7fb2bdd0b8b6e4
|
coex/direct.py
|
coex/direct.py
|
"""Analyze direct (grand canonical) simulations."""
from __future__ import division
import os.path
import numpy as np
from scipy.optimize import fsolve
from coex.read import read_lnpi
def transform(distribution, amount):
"""Perform linear transformation on a probability distribution.
Args:
distribution: A dict with keys 'param' and 'logp', as
returned by read_lnpi.
amount: The amount to shift the distribution using the
formula ln(P) + N * amount.
Returns:
A numpy array with the shifted logarithmic probabilities.
"""
return distribution['param'] * amount + distribution['logp']
def get_coexistence(directory, activities, species=1):
"""Find the coexistence point of a direct simulation.
Args:
directory: The location of the simulation.
activities: A list of the activities of each species in the
simulation.
species: An int denoting which species to use for histogram
reweighting.
Returns:
A dict with the keys 'distribution' and 'activities'
containing the logarithmic probability distribution and
activities at the coexistence point.
"""
dist = read_lnpi(os.path.join(directory, 'lnpi_op.dat'))
split = int(0.5 * len(dist['param']))
def objective(x):
transformed = np.exp(transform(dist, x))
vapor = sum(transformed[:split])
liquid = sum(transformed[split:])
return np.abs(vapor - liquid)
solution = fsolve(objective, x0=1.0, maxfev=1000)
dist['logp'] = transform(dist, solution)
result = {'distribution': dist, 'activities': np.copy(activities)}
result['activities'][species] *= np.exp(solution)
return result
|
Add analysis code for GC simulations.
|
Add analysis code for GC simulations.
|
Python
|
bsd-2-clause
|
adamrall/coex
|
Add analysis code for GC simulations.
|
"""Analyze direct (grand canonical) simulations."""
from __future__ import division
import os.path
import numpy as np
from scipy.optimize import fsolve
from coex.read import read_lnpi
def transform(distribution, amount):
"""Perform linear transformation on a probability distribution.
Args:
distribution: A dict with keys 'param' and 'logp', as
returned by read_lnpi.
amount: The amount to shift the distribution using the
formula ln(P) + N * amount.
Returns:
A numpy array with the shifted logarithmic probabilities.
"""
return distribution['param'] * amount + distribution['logp']
def get_coexistence(directory, activities, species=1):
"""Find the coexistence point of a direct simulation.
Args:
directory: The location of the simulation.
activities: A list of the activities of each species in the
simulation.
species: An int denoting which species to use for histogram
reweighting.
Returns:
A dict with the keys 'distribution' and 'activities'
containing the logarithmic probability distribution and
activities at the coexistence point.
"""
dist = read_lnpi(os.path.join(directory, 'lnpi_op.dat'))
split = int(0.5 * len(dist['param']))
def objective(x):
transformed = np.exp(transform(dist, x))
vapor = sum(transformed[:split])
liquid = sum(transformed[split:])
return np.abs(vapor - liquid)
solution = fsolve(objective, x0=1.0, maxfev=1000)
dist['logp'] = transform(dist, solution)
result = {'distribution': dist, 'activities': np.copy(activities)}
result['activities'][species] *= np.exp(solution)
return result
|
<commit_before><commit_msg>Add analysis code for GC simulations.<commit_after>
|
"""Analyze direct (grand canonical) simulations."""
from __future__ import division
import os.path
import numpy as np
from scipy.optimize import fsolve
from coex.read import read_lnpi
def transform(distribution, amount):
"""Perform linear transformation on a probability distribution.
Args:
distribution: A dict with keys 'param' and 'logp', as
returned by read_lnpi.
amount: The amount to shift the distribution using the
formula ln(P) + N * amount.
Returns:
A numpy array with the shifted logarithmic probabilities.
"""
return distribution['param'] * amount + distribution['logp']
def get_coexistence(directory, activities, species=1):
"""Find the coexistence point of a direct simulation.
Args:
directory: The location of the simulation.
activities: A list of the activities of each species in the
simulation.
species: An int denoting which species to use for histogram
reweighting.
Returns:
A dict with the keys 'distribution' and 'activities'
containing the logarithmic probability distribution and
activities at the coexistence point.
"""
dist = read_lnpi(os.path.join(directory, 'lnpi_op.dat'))
split = int(0.5 * len(dist['param']))
def objective(x):
transformed = np.exp(transform(dist, x))
vapor = sum(transformed[:split])
liquid = sum(transformed[split:])
return np.abs(vapor - liquid)
solution = fsolve(objective, x0=1.0, maxfev=1000)
dist['logp'] = transform(dist, solution)
result = {'distribution': dist, 'activities': np.copy(activities)}
result['activities'][species] *= np.exp(solution)
return result
|
Add analysis code for GC simulations."""Analyze direct (grand canonical) simulations."""
from __future__ import division
import os.path
import numpy as np
from scipy.optimize import fsolve
from coex.read import read_lnpi
def transform(distribution, amount):
"""Perform linear transformation on a probability distribution.
Args:
distribution: A dict with keys 'param' and 'logp', as
returned by read_lnpi.
amount: The amount to shift the distribution using the
formula ln(P) + N * amount.
Returns:
A numpy array with the shifted logarithmic probabilities.
"""
return distribution['param'] * amount + distribution['logp']
def get_coexistence(directory, activities, species=1):
"""Find the coexistence point of a direct simulation.
Args:
directory: The location of the simulation.
activities: A list of the activities of each species in the
simulation.
species: An int denoting which species to use for histogram
reweighting.
Returns:
A dict with the keys 'distribution' and 'activities'
containing the logarithmic probability distribution and
activities at the coexistence point.
"""
dist = read_lnpi(os.path.join(directory, 'lnpi_op.dat'))
split = int(0.5 * len(dist['param']))
def objective(x):
transformed = np.exp(transform(dist, x))
vapor = sum(transformed[:split])
liquid = sum(transformed[split:])
return np.abs(vapor - liquid)
solution = fsolve(objective, x0=1.0, maxfev=1000)
dist['logp'] = transform(dist, solution)
result = {'distribution': dist, 'activities': np.copy(activities)}
result['activities'][species] *= np.exp(solution)
return result
|
<commit_before><commit_msg>Add analysis code for GC simulations.<commit_after>"""Analyze direct (grand canonical) simulations."""
from __future__ import division
import os.path
import numpy as np
from scipy.optimize import fsolve
from coex.read import read_lnpi
def transform(distribution, amount):
"""Perform linear transformation on a probability distribution.
Args:
distribution: A dict with keys 'param' and 'logp', as
returned by read_lnpi.
amount: The amount to shift the distribution using the
formula ln(P) + N * amount.
Returns:
A numpy array with the shifted logarithmic probabilities.
"""
return distribution['param'] * amount + distribution['logp']
def get_coexistence(directory, activities, species=1):
"""Find the coexistence point of a direct simulation.
Args:
directory: The location of the simulation.
activities: A list of the activities of each species in the
simulation.
species: An int denoting which species to use for histogram
reweighting.
Returns:
A dict with the keys 'distribution' and 'activities'
containing the logarithmic probability distribution and
activities at the coexistence point.
"""
dist = read_lnpi(os.path.join(directory, 'lnpi_op.dat'))
split = int(0.5 * len(dist['param']))
def objective(x):
transformed = np.exp(transform(dist, x))
vapor = sum(transformed[:split])
liquid = sum(transformed[split:])
return np.abs(vapor - liquid)
solution = fsolve(objective, x0=1.0, maxfev=1000)
dist['logp'] = transform(dist, solution)
result = {'distribution': dist, 'activities': np.copy(activities)}
result['activities'][species] *= np.exp(solution)
return result
|
|
eaf87158eab3e4ebceafca3569b2828593040882
|
tests/Levitt1971-Fig5.py
|
tests/Levitt1971-Fig5.py
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, CORRECT, INCORRECT,
INCORRECT, CORRECT, INCORRECT, CORRECT, CORRECT, CORRECT, CORRECT,
CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT]
initalValue = 0.0
stepSize = 1.0
down = 2
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[1:4].values
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.0, 1.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 5, 11, 15, 19]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [5, 12, 16, 20, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Test case from Levitt figure 4
|
Test case from Levitt figure 4
|
Python
|
mit
|
codles/UpDownMethods
|
Test case from Levitt figure 4
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, CORRECT, INCORRECT,
INCORRECT, CORRECT, INCORRECT, CORRECT, CORRECT, CORRECT, CORRECT,
CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT]
initalValue = 0.0
stepSize = 1.0
down = 2
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[1:4].values
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.0, 1.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 5, 11, 15, 19]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [5, 12, 16, 20, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Test case from Levitt figure 4<commit_after>
|
from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, CORRECT, INCORRECT,
INCORRECT, CORRECT, INCORRECT, CORRECT, CORRECT, CORRECT, CORRECT,
CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT]
initalValue = 0.0
stepSize = 1.0
down = 2
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[1:4].values
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.0, 1.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 5, 11, 15, 19]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [5, 12, 16, 20, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Test case from Levitt figure 4from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, CORRECT, INCORRECT,
INCORRECT, CORRECT, INCORRECT, CORRECT, CORRECT, CORRECT, CORRECT,
CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT]
initalValue = 0.0
stepSize = 1.0
down = 2
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[1:4].values
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.0, 1.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 5, 11, 15, 19]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [5, 12, 16, 20, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Test case from Levitt figure 4<commit_after>from UpDownMethods import CORRECT, INCORRECT
import UpDownMethods as ud
import numpy as np
import matplotlib.pyplot as plt
import unittest
#
# Simulation parameters
#
responses = [CORRECT, CORRECT, CORRECT, CORRECT, INCORRECT, CORRECT, INCORRECT,
INCORRECT, CORRECT, INCORRECT, CORRECT, CORRECT, CORRECT, CORRECT,
CORRECT, INCORRECT, INCORRECT, INCORRECT, CORRECT, CORRECT,
CORRECT, CORRECT, CORRECT, CORRECT]
initalValue = 0.0
stepSize = 1.0
down = 2
up = 1
#
# Test code
#
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.results = ud.initiate_procedure()
nextValue, self.results = ud.append_result(self.results, responses[0],
down, up, stepSize,
initalValue)
for resp in responses[1:]:
nextValue, self.results = ud.append_result(self.results, resp,
down, up, stepSize,
nextValue)
def test_initiateResults(self):
self.results = ud.initiate_procedure()
self.assertIs(len(self.results), 0)
def test_calculateMidpoints(self):
mids = ud.midpoints(self.results)
mids = mids["Midpoint"]
mids = mids[1:4].values
self.assertIsNone(np.testing.assert_array_equal(mids, [0.0, 1.0, 1.5]))
def test_plotResults(self):
ud.plot_results(self.results)
plt.savefig('test.png', bbox_inches='tight')
def test_runs(self):
runs = ud.runs(self.results)
self.assertIsNone(np.testing.assert_array_equal(
runs["Start"].values, [1, 5, 11, 15, 19]))
self.assertIsNone(np.testing.assert_array_equal(
runs["Finish"].values, [5, 12, 16, 20, 24]))
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
a58e618712bdd0ef29df39a2104c9d24e3d0dfcb
|
packager/core/repo_tools.py
|
packager/core/repo_tools.py
|
import os, shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the current
directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
print(url)
local_file = os.path.join(dest, os.path.basename(url))
print(local_file)
urllib.urlretrieve(url, local_file)
return local_file
def unpack(file, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
current directory.
'''
print(file)
zip = zipfile.ZipFile(file, mode='r')
zip.extractall(dest)
files = zip.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack_dir = unpack(zip_file, dest=tmp_dir)
print(unpack_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
Add tools for getting models and tools repos
|
Add tools for getting models and tools repos
These are patterned on @mcflugen's "components.py" --
https://github.com/csdms/wmt/blob/master/server/wmt/installer/components
.py.
|
Python
|
mit
|
csdms/packagebuilder
|
Add tools for getting models and tools repos
These are patterned on @mcflugen's "components.py" --
https://github.com/csdms/wmt/blob/master/server/wmt/installer/components
.py.
|
import os, shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the current
directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
print(url)
local_file = os.path.join(dest, os.path.basename(url))
print(local_file)
urllib.urlretrieve(url, local_file)
return local_file
def unpack(file, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
current directory.
'''
print(file)
zip = zipfile.ZipFile(file, mode='r')
zip.extractall(dest)
files = zip.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack_dir = unpack(zip_file, dest=tmp_dir)
print(unpack_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tools for getting models and tools repos
These are patterned on @mcflugen's "components.py" --
https://github.com/csdms/wmt/blob/master/server/wmt/installer/components
.py.<commit_after>
|
import os, shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the current
directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
print(url)
local_file = os.path.join(dest, os.path.basename(url))
print(local_file)
urllib.urlretrieve(url, local_file)
return local_file
def unpack(file, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
current directory.
'''
print(file)
zip = zipfile.ZipFile(file, mode='r')
zip.extractall(dest)
files = zip.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack_dir = unpack(zip_file, dest=tmp_dir)
print(unpack_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
Add tools for getting models and tools repos
These are patterned on @mcflugen's "components.py" --
https://github.com/csdms/wmt/blob/master/server/wmt/installer/components
.py.import os, shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the current
directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
print(url)
local_file = os.path.join(dest, os.path.basename(url))
print(local_file)
urllib.urlretrieve(url, local_file)
return local_file
def unpack(file, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
current directory.
'''
print(file)
zip = zipfile.ZipFile(file, mode='r')
zip.extractall(dest)
files = zip.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack_dir = unpack(zip_file, dest=tmp_dir)
print(unpack_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tools for getting models and tools repos
These are patterned on @mcflugen's "components.py" --
https://github.com/csdms/wmt/blob/master/server/wmt/installer/components
.py.<commit_after>import os, shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the current
directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
print(url)
local_file = os.path.join(dest, os.path.basename(url))
print(local_file)
urllib.urlretrieve(url, local_file)
return local_file
def unpack(file, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
current directory.
'''
print(file)
zip = zipfile.ZipFile(file, mode='r')
zip.extractall(dest)
files = zip.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack_dir = unpack(zip_file, dest=tmp_dir)
print(unpack_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
|
cbbc1f163335cd1572509bcacaff691be90ba5be
|
tests/flights_to_test.py
|
tests/flights_to_test.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicInstaflights(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_basic_request(self):
city = 'YTO'
instaf = self.sds.flights_to(city)
self.assertIsNotNone(instaf)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.flights_to('YTO')
if __name__ == '__main__':
unittest.main()
|
Add tests for Flights To
|
Add tests for Flights To
|
Python
|
mit
|
Jamil/sabre_dev_studio
|
Add tests for Flights To
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicInstaflights(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_basic_request(self):
city = 'YTO'
instaf = self.sds.flights_to(city)
self.assertIsNotNone(instaf)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.flights_to('YTO')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for Flights To<commit_after>
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicInstaflights(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_basic_request(self):
city = 'YTO'
instaf = self.sds.flights_to(city)
self.assertIsNotNone(instaf)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.flights_to('YTO')
if __name__ == '__main__':
unittest.main()
|
Add tests for Flights Toimport unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicInstaflights(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_basic_request(self):
city = 'YTO'
instaf = self.sds.flights_to(city)
self.assertIsNotNone(instaf)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.flights_to('YTO')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for Flights To<commit_after>import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicInstaflights(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_basic_request(self):
city = 'YTO'
instaf = self.sds.flights_to(city)
self.assertIsNotNone(instaf)
def test_no_authorization(self):
sds = sabre_dev_studio.SabreDevStudio()
with self.assertRaises(sabre_exceptions.NotAuthorizedError):
resp = sds.flights_to('YTO')
if __name__ == '__main__':
unittest.main()
|
|
d93d3988da51f94a4979d8a4879d54bc89b0ba01
|
sympy/printing/tests/test_numpy.py
|
sympy/printing/tests/test_numpy.py
|
from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
Add test for NumPyPrinter regression
|
Add test for NumPyPrinter regression
|
Python
|
bsd-3-clause
|
skirpichev/omg,diofant/diofant
|
Add test for NumPyPrinter regression
|
from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
<commit_before><commit_msg>Add test for NumPyPrinter regression<commit_after>
|
from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
Add test for NumPyPrinter regressionfrom sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
<commit_before><commit_msg>Add test for NumPyPrinter regression<commit_after>from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
|
47925d50b209373ff37666d703943f3252735fac
|
tools/wrap_natives.py
|
tools/wrap_natives.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Zeex
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def main(argv):
natives = []
for filename in argv[1:]:
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'native\s+([a-zA-Z_@][a-zA-Z0-9_@]*\(.*?\))\s*;',
line, re.MULTILINE)
if match is not None:
native = match.group(1)
natives.append(native)
for native in natives:
name = re.sub(r'(.*)\(.*\)', r'\1', native)
params = re.sub(r'.*\((.*)\)', r'\1', native)
param_names = [m.group(1)
for m in re.finditer(
r'(?:const\s+)?'
r'(?:(?:{.*?}|\S+):\s*)?'
r'(\S+?)'
r'(?:\s*\[.*?\])?'
r'(?:\s*=\s*.+)?'
r'\s*(?:,|$)', params)]
if '...' not in param_names:
print('stock _%s(%s) {' % (name, params))
print('\treturn %s(%s);' % (name, ', '.join(param_names)))
print('}')
print('#define %s _%s\n' % (name, name))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for generating native wrappers
|
Add script for generating native wrappers
The script helps you see what arguments are passed to native functions
in [stack] trace. It takes an include file as input and outputs code
similar to:
stock _print(const string[]) {
return print(string);
}
for each native function. Variable arguments ("...") are not supported.
|
Python
|
bsd-2-clause
|
Zeex/samp-plugin-crashdetect,Zeex/samp-plugin-crashdetect,Zeex/samp-plugin-crashdetect
|
Add script for generating native wrappers
The script helps you see what arguments are passed to native functions
in [stack] trace. It takes an include file as input and outputs code
similar to:
stock _print(const string[]) {
return print(string);
}
for each native function. Variable arguments ("...") are not supported.
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Zeex
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def main(argv):
natives = []
for filename in argv[1:]:
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'native\s+([a-zA-Z_@][a-zA-Z0-9_@]*\(.*?\))\s*;',
line, re.MULTILINE)
if match is not None:
native = match.group(1)
natives.append(native)
for native in natives:
name = re.sub(r'(.*)\(.*\)', r'\1', native)
params = re.sub(r'.*\((.*)\)', r'\1', native)
param_names = [m.group(1)
for m in re.finditer(
r'(?:const\s+)?'
r'(?:(?:{.*?}|\S+):\s*)?'
r'(\S+?)'
r'(?:\s*\[.*?\])?'
r'(?:\s*=\s*.+)?'
r'\s*(?:,|$)', params)]
if '...' not in param_names:
print('stock _%s(%s) {' % (name, params))
print('\treturn %s(%s);' % (name, ', '.join(param_names)))
print('}')
print('#define %s _%s\n' % (name, name))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for generating native wrappers
The script helps you see what arguments are passed to native functions
in [stack] trace. It takes an include file as input and outputs code
similar to:
stock _print(const string[]) {
return print(string);
}
for each native function. Variable arguments ("...") are not supported.<commit_after>
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Zeex
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def main(argv):
natives = []
for filename in argv[1:]:
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'native\s+([a-zA-Z_@][a-zA-Z0-9_@]*\(.*?\))\s*;',
line, re.MULTILINE)
if match is not None:
native = match.group(1)
natives.append(native)
for native in natives:
name = re.sub(r'(.*)\(.*\)', r'\1', native)
params = re.sub(r'.*\((.*)\)', r'\1', native)
param_names = [m.group(1)
for m in re.finditer(
r'(?:const\s+)?'
r'(?:(?:{.*?}|\S+):\s*)?'
r'(\S+?)'
r'(?:\s*\[.*?\])?'
r'(?:\s*=\s*.+)?'
r'\s*(?:,|$)', params)]
if '...' not in param_names:
print('stock _%s(%s) {' % (name, params))
print('\treturn %s(%s);' % (name, ', '.join(param_names)))
print('}')
print('#define %s _%s\n' % (name, name))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for generating native wrappers
The script helps you see what arguments are passed to native functions
in [stack] trace. It takes an include file as input and outputs code
similar to:
stock _print(const string[]) {
return print(string);
}
for each native function. Variable arguments ("...") are not supported.#!/usr/bin/env python
#
# Copyright (c) 2014 Zeex
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def main(argv):
natives = []
for filename in argv[1:]:
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'native\s+([a-zA-Z_@][a-zA-Z0-9_@]*\(.*?\))\s*;',
line, re.MULTILINE)
if match is not None:
native = match.group(1)
natives.append(native)
for native in natives:
name = re.sub(r'(.*)\(.*\)', r'\1', native)
params = re.sub(r'.*\((.*)\)', r'\1', native)
param_names = [m.group(1)
for m in re.finditer(
r'(?:const\s+)?'
r'(?:(?:{.*?}|\S+):\s*)?'
r'(\S+?)'
r'(?:\s*\[.*?\])?'
r'(?:\s*=\s*.+)?'
r'\s*(?:,|$)', params)]
if '...' not in param_names:
print('stock _%s(%s) {' % (name, params))
print('\treturn %s(%s);' % (name, ', '.join(param_names)))
print('}')
print('#define %s _%s\n' % (name, name))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for generating native wrappers
The script helps you see what arguments are passed to native functions
in [stack] trace. It takes an include file as input and outputs code
similar to:
stock _print(const string[]) {
return print(string);
}
for each native function. Variable arguments ("...") are not supported.<commit_after>#!/usr/bin/env python
#
# Copyright (c) 2014 Zeex
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def main(argv):
natives = []
for filename in argv[1:]:
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'native\s+([a-zA-Z_@][a-zA-Z0-9_@]*\(.*?\))\s*;',
line, re.MULTILINE)
if match is not None:
native = match.group(1)
natives.append(native)
for native in natives:
name = re.sub(r'(.*)\(.*\)', r'\1', native)
params = re.sub(r'.*\((.*)\)', r'\1', native)
param_names = [m.group(1)
for m in re.finditer(
r'(?:const\s+)?'
r'(?:(?:{.*?}|\S+):\s*)?'
r'(\S+?)'
r'(?:\s*\[.*?\])?'
r'(?:\s*=\s*.+)?'
r'\s*(?:,|$)', params)]
if '...' not in param_names:
print('stock _%s(%s) {' % (name, params))
print('\treturn %s(%s);' % (name, ', '.join(param_names)))
print('}')
print('#define %s _%s\n' % (name, name))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
728580099f0538a13e85a8cdb9c09af2915c9fc4
|
examples/nyc_boros.py
|
examples/nyc_boros.py
|
"""
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Sun Jul 7 17:31:12 IST 2013>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_13a.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros['geometry'].convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
|
Add script to generate NYC examples
|
Add script to generate NYC examples
|
Python
|
bsd-3-clause
|
jwass/geopandas,maxalbert/geopandas,jdmcbr/geopandas,snario/geopandas,perrygeo/geopandas,jdmcbr/geopandas,geopandas/geopandas,fonnesbeck/geopandas,jorisvandenbossche/geopandas,geopandas/geopandas,koldunovn/geopandas,micahcochran/geopandas,jorisvandenbossche/geopandas,jwass/geopandas,scw/geopandas,jorisvandenbossche/geopandas,micahcochran/geopandas,IamJeffG/geopandas,kwinkunks/geopandas,ozak/geopandas,ozak/geopandas,geopandas/geopandas,urschrei/geopandas
|
Add script to generate NYC examples
|
"""
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Sun Jul 7 17:31:12 IST 2013>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_13a.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros['geometry'].convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
|
<commit_before><commit_msg>Add script to generate NYC examples<commit_after>
|
"""
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Sun Jul 7 17:31:12 IST 2013>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_13a.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros['geometry'].convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
|
Add script to generate NYC examples"""
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Sun Jul 7 17:31:12 IST 2013>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_13a.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros['geometry'].convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
|
<commit_before><commit_msg>Add script to generate NYC examples<commit_after>"""
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Sun Jul 7 17:31:12 IST 2013>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_13a.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros['geometry'].convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
|
|
87cca84b6750a3176b86df2786a9b78f7647c062
|
plugins/hello/hello_test.py
|
plugins/hello/hello_test.py
|
from p1tr.test import *
class HelloTest(PluginTestCase):
@test
def hello_test(self):
for data in self.dummy_data:
self.assertEqual(self.plugin.hello(data.server, data.channel,
data.nick, data.params),
'Hello, %s!' % data.nick.split('!')[0])
|
Add test case for hello plugin
|
Add test case for hello plugin
|
Python
|
mit
|
howard/p1tr-tng,howard/p1tr-tng
|
Add test case for hello plugin
|
from p1tr.test import *
class HelloTest(PluginTestCase):
@test
def hello_test(self):
for data in self.dummy_data:
self.assertEqual(self.plugin.hello(data.server, data.channel,
data.nick, data.params),
'Hello, %s!' % data.nick.split('!')[0])
|
<commit_before><commit_msg>Add test case for hello plugin<commit_after>
|
from p1tr.test import *
class HelloTest(PluginTestCase):
@test
def hello_test(self):
for data in self.dummy_data:
self.assertEqual(self.plugin.hello(data.server, data.channel,
data.nick, data.params),
'Hello, %s!' % data.nick.split('!')[0])
|
Add test case for hello pluginfrom p1tr.test import *
class HelloTest(PluginTestCase):
@test
def hello_test(self):
for data in self.dummy_data:
self.assertEqual(self.plugin.hello(data.server, data.channel,
data.nick, data.params),
'Hello, %s!' % data.nick.split('!')[0])
|
<commit_before><commit_msg>Add test case for hello plugin<commit_after>from p1tr.test import *
class HelloTest(PluginTestCase):
@test
def hello_test(self):
for data in self.dummy_data:
self.assertEqual(self.plugin.hello(data.server, data.channel,
data.nick, data.params),
'Hello, %s!' % data.nick.split('!')[0])
|
|
caea65165c5443252763b1efaf60210b6f59f1cd
|
migrations/versions/0117_international_sms_notify.py
|
migrations/versions/0117_international_sms_notify.py
|
"""empty message
Revision ID: 0117_international_sms_notify
Revises: 0116_another_letter_org
Create Date: 2017-08-29 14:09:41.042061
"""
# revision identifiers, used by Alembic.
revision = '0117_international_sms_notify'
down_revision = '0116_another_letter_org'
from alembic import op
from datetime import datetime
NOTIFY_SERVICE_ID = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'
def upgrade():
op.execute("""
INSERT INTO service_permissions VALUES
('{}', 'international_sms', '{}')
""".format(NOTIFY_SERVICE_ID, datetime.utcnow()))
def downgrade():
op.execute("""
DELETE FROM service_permissions
WHERE
service_id = '{}' AND
permission = 'international_sms'
""".format(NOTIFY_SERVICE_ID))
|
Allow Notify service to send international sms
|
Allow Notify service to send international sms
Right now Notify restricts you to registering with a UK mobile number.
This is because when we built the user registration stuff we couldn’t
send to international mobiles.
However we can send to international mobile numbers, and it’s totally
reasonable to expect employees of the UK government to be working
abroad, and have a foreign mobile phone – we’ve heard from one such
user.
In order for users of Notify to register with an international phone
number, the Notify service needs to have the `international_sms`
permission set. Which this service does, as a data migration.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Allow Notify service to send international sms
Right now Notify restricts you to registering with a UK mobile number.
This is because when we built the user registration stuff we couldn’t
send to international mobiles.
However we can send to international mobile numbers, and it’s totally
reasonable to expect employees of the UK government to be working
abroad, and have a foreign mobile phone – we’ve heard from one such
user.
In order for users of Notify to register with an international phone
number, the Notify service needs to have the `international_sms`
permission set. Which this service does, as a data migration.
|
"""empty message
Revision ID: 0117_international_sms_notify
Revises: 0116_another_letter_org
Create Date: 2017-08-29 14:09:41.042061
"""
# revision identifiers, used by Alembic.
revision = '0117_international_sms_notify'
down_revision = '0116_another_letter_org'
from alembic import op
from datetime import datetime
NOTIFY_SERVICE_ID = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'
def upgrade():
op.execute("""
INSERT INTO service_permissions VALUES
('{}', 'international_sms', '{}')
""".format(NOTIFY_SERVICE_ID, datetime.utcnow()))
def downgrade():
op.execute("""
DELETE FROM service_permissions
WHERE
service_id = '{}' AND
permission = 'international_sms'
""".format(NOTIFY_SERVICE_ID))
|
<commit_before><commit_msg>Allow Notify service to send international sms
Right now Notify restricts you to registering with a UK mobile number.
This is because when we built the user registration stuff we couldn’t
send to international mobiles.
However we can send to international mobile numbers, and it’s totally
reasonable to expect employees of the UK government to be working
abroad, and have a foreign mobile phone – we’ve heard from one such
user.
In order for users of Notify to register with an international phone
number, the Notify service needs to have the `international_sms`
permission set. Which this service does, as a data migration.<commit_after>
|
"""empty message
Revision ID: 0117_international_sms_notify
Revises: 0116_another_letter_org
Create Date: 2017-08-29 14:09:41.042061
"""
# revision identifiers, used by Alembic.
revision = '0117_international_sms_notify'
down_revision = '0116_another_letter_org'
from alembic import op
from datetime import datetime
NOTIFY_SERVICE_ID = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'
def upgrade():
op.execute("""
INSERT INTO service_permissions VALUES
('{}', 'international_sms', '{}')
""".format(NOTIFY_SERVICE_ID, datetime.utcnow()))
def downgrade():
op.execute("""
DELETE FROM service_permissions
WHERE
service_id = '{}' AND
permission = 'international_sms'
""".format(NOTIFY_SERVICE_ID))
|
Allow Notify service to send international sms
Right now Notify restricts you to registering with a UK mobile number.
This is because when we built the user registration stuff we couldn’t
send to international mobiles.
However we can send to international mobile numbers, and it’s totally
reasonable to expect employees of the UK government to be working
abroad, and have a foreign mobile phone – we’ve heard from one such
user.
In order for users of Notify to register with an international phone
number, the Notify service needs to have the `international_sms`
permission set. Which this service does, as a data migration."""empty message
Revision ID: 0117_international_sms_notify
Revises: 0116_another_letter_org
Create Date: 2017-08-29 14:09:41.042061
"""
# revision identifiers, used by Alembic.
revision = '0117_international_sms_notify'
down_revision = '0116_another_letter_org'
from alembic import op
from datetime import datetime
NOTIFY_SERVICE_ID = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'
def upgrade():
op.execute("""
INSERT INTO service_permissions VALUES
('{}', 'international_sms', '{}')
""".format(NOTIFY_SERVICE_ID, datetime.utcnow()))
def downgrade():
op.execute("""
DELETE FROM service_permissions
WHERE
service_id = '{}' AND
permission = 'international_sms'
""".format(NOTIFY_SERVICE_ID))
|
<commit_before><commit_msg>Allow Notify service to send international sms
Right now Notify restricts you to registering with a UK mobile number.
This is because when we built the user registration stuff we couldn’t
send to international mobiles.
However we can send to international mobile numbers, and it’s totally
reasonable to expect employees of the UK government to be working
abroad, and have a foreign mobile phone – we’ve heard from one such
user.
In order for users of Notify to register with an international phone
number, the Notify service needs to have the `international_sms`
permission set. Which this service does, as a data migration.<commit_after>"""empty message
Revision ID: 0117_international_sms_notify
Revises: 0116_another_letter_org
Create Date: 2017-08-29 14:09:41.042061
"""
# revision identifiers, used by Alembic.
revision = '0117_international_sms_notify'
down_revision = '0116_another_letter_org'
from alembic import op
from datetime import datetime
NOTIFY_SERVICE_ID = 'd6aa2c68-a2d9-4437-ab19-3ae8eb202553'
def upgrade():
op.execute("""
INSERT INTO service_permissions VALUES
('{}', 'international_sms', '{}')
""".format(NOTIFY_SERVICE_ID, datetime.utcnow()))
def downgrade():
op.execute("""
DELETE FROM service_permissions
WHERE
service_id = '{}' AND
permission = 'international_sms'
""".format(NOTIFY_SERVICE_ID))
|
|
c340e6bebcfac3265e67b6bdb333874e5942e4af
|
cronjob.py
|
cronjob.py
|
import re
from parsers import Parse
class CronJob(object):
def __init__(self, line):
# matches five fields separated by whitespace and then everything else
# (the command)
match = re.match(r'^(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(.*)$', line);
self.minutes = Parse.minutes(match.group(1))
self.hours = Parse.hours(match.group(2))
self.days_of_month = Parse.days_of_month(match.group(3))
self.months = Parse.months(match.group(4))
self.days_of_week = Parse.days_of_week(match.group(5))
self.command = match.group(6)
if __name__ == '__main__':
c = CronJob('* * * * * awesome-command somefilename');
print c.minutes, c.hours, c.days_of_month, c.months, c.days_of_week
|
Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)
|
Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)
|
Python
|
mit
|
ChrisTM/next-crons
|
Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)
|
import re
from parsers import Parse
class CronJob(object):
def __init__(self, line):
# matches five fields separated by whitespace and then everything else
# (the command)
match = re.match(r'^(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(.*)$', line);
self.minutes = Parse.minutes(match.group(1))
self.hours = Parse.hours(match.group(2))
self.days_of_month = Parse.days_of_month(match.group(3))
self.months = Parse.months(match.group(4))
self.days_of_week = Parse.days_of_week(match.group(5))
self.command = match.group(6)
if __name__ == '__main__':
c = CronJob('* * * * * awesome-command somefilename');
print c.minutes, c.hours, c.days_of_month, c.months, c.days_of_week
|
<commit_before><commit_msg>Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)<commit_after>
|
import re
from parsers import Parse
class CronJob(object):
def __init__(self, line):
# matches five fields separated by whitespace and then everything else
# (the command)
match = re.match(r'^(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(.*)$', line);
self.minutes = Parse.minutes(match.group(1))
self.hours = Parse.hours(match.group(2))
self.days_of_month = Parse.days_of_month(match.group(3))
self.months = Parse.months(match.group(4))
self.days_of_week = Parse.days_of_week(match.group(5))
self.command = match.group(6)
if __name__ == '__main__':
c = CronJob('* * * * * awesome-command somefilename');
print c.minutes, c.hours, c.days_of_month, c.months, c.days_of_week
|
Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)import re
from parsers import Parse
class CronJob(object):
def __init__(self, line):
# matches five fields separated by whitespace and then everything else
# (the command)
match = re.match(r'^(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(.*)$', line);
self.minutes = Parse.minutes(match.group(1))
self.hours = Parse.hours(match.group(2))
self.days_of_month = Parse.days_of_month(match.group(3))
self.months = Parse.months(match.group(4))
self.days_of_week = Parse.days_of_week(match.group(5))
self.command = match.group(6)
if __name__ == '__main__':
c = CronJob('* * * * * awesome-command somefilename');
print c.minutes, c.hours, c.days_of_month, c.months, c.days_of_week
|
<commit_before><commit_msg>Add CronJob class; appears to turn cron-job line into proper data (no weekday/month name aliasing yet)<commit_after>import re
from parsers import Parse
class CronJob(object):
def __init__(self, line):
# matches five fields separated by whitespace and then everything else
# (the command)
match = re.match(r'^(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s+(.*)$', line);
self.minutes = Parse.minutes(match.group(1))
self.hours = Parse.hours(match.group(2))
self.days_of_month = Parse.days_of_month(match.group(3))
self.months = Parse.months(match.group(4))
self.days_of_week = Parse.days_of_week(match.group(5))
self.command = match.group(6)
if __name__ == '__main__':
c = CronJob('* * * * * awesome-command somefilename');
print c.minutes, c.hours, c.days_of_month, c.months, c.days_of_week
|
|
f4a04d62de2e83c146caf5237e72967185560ba2
|
regulations/management/commands/setup_cors.py
|
regulations/management/commands/setup_cors.py
|
import boto3
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Set CORS rules on the Notice and Comment attachment bucket'
def handle(self, *args, **options):
session = boto3.Session(
aws_access_key_id=settings.ATTACHMENT_ACCESS_KEY_ID,
aws_secret_access_key=settings.ATTACHMENT_SECRET_ACCESS_KEY,
)
s3 = session.client('s3')
s3.put_bucket_cors(
Bucket=settings.ATTACHMENT_BUCKET,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*'],
'AllowedHeaders': ['*'],
},
],
},
)
|
Add command for S3 CORS configuration.
|
Add command for S3 CORS configuration.
To be run on app startup or manually.
[Resolves https://github.com/eregs/notice-and-comment/issues/57]
|
Python
|
cc0-1.0
|
18F/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,18F/regulations-site,18F/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site
|
Add command for S3 CORS configuration.
To be run on app startup or manually.
[Resolves https://github.com/eregs/notice-and-comment/issues/57]
|
import boto3
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Set CORS rules on the Notice and Comment attachment bucket'
def handle(self, *args, **options):
session = boto3.Session(
aws_access_key_id=settings.ATTACHMENT_ACCESS_KEY_ID,
aws_secret_access_key=settings.ATTACHMENT_SECRET_ACCESS_KEY,
)
s3 = session.client('s3')
s3.put_bucket_cors(
Bucket=settings.ATTACHMENT_BUCKET,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*'],
'AllowedHeaders': ['*'],
},
],
},
)
|
<commit_before><commit_msg>Add command for S3 CORS configuration.
To be run on app startup or manually.
[Resolves https://github.com/eregs/notice-and-comment/issues/57]<commit_after>
|
import boto3
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Set CORS rules on the Notice and Comment attachment bucket'
def handle(self, *args, **options):
session = boto3.Session(
aws_access_key_id=settings.ATTACHMENT_ACCESS_KEY_ID,
aws_secret_access_key=settings.ATTACHMENT_SECRET_ACCESS_KEY,
)
s3 = session.client('s3')
s3.put_bucket_cors(
Bucket=settings.ATTACHMENT_BUCKET,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*'],
'AllowedHeaders': ['*'],
},
],
},
)
|
Add command for S3 CORS configuration.
To be run on app startup or manually.
[Resolves https://github.com/eregs/notice-and-comment/issues/57]import boto3
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Set CORS rules on the Notice and Comment attachment bucket'
def handle(self, *args, **options):
session = boto3.Session(
aws_access_key_id=settings.ATTACHMENT_ACCESS_KEY_ID,
aws_secret_access_key=settings.ATTACHMENT_SECRET_ACCESS_KEY,
)
s3 = session.client('s3')
s3.put_bucket_cors(
Bucket=settings.ATTACHMENT_BUCKET,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*'],
'AllowedHeaders': ['*'],
},
],
},
)
|
<commit_before><commit_msg>Add command for S3 CORS configuration.
To be run on app startup or manually.
[Resolves https://github.com/eregs/notice-and-comment/issues/57]<commit_after>import boto3
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Set CORS rules on the Notice and Comment attachment bucket'
def handle(self, *args, **options):
session = boto3.Session(
aws_access_key_id=settings.ATTACHMENT_ACCESS_KEY_ID,
aws_secret_access_key=settings.ATTACHMENT_SECRET_ACCESS_KEY,
)
s3 = session.client('s3')
s3.put_bucket_cors(
Bucket=settings.ATTACHMENT_BUCKET,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*'],
'AllowedHeaders': ['*'],
},
],
},
)
|
|
51c9706c5243d5edc416da097309673cfa0ee495
|
data-wrangling/csv2lua.py
|
data-wrangling/csv2lua.py
|
# Convert a CSV file to a Lua table script that can be imported using `dofile`.
import csv
def csv2lua(in_file, out_file, global_name):
fp_in = open(in_file, 'r')
rows = list(csv.reader(fp_in))
fp_in.close()
headers = rows[0]
lua_rows = []
for row in rows[1:]:
cells = []
print row
for i, cell in enumerate(row):
key = headers[i]
try:
cell = int(cell)
cells.append('%s=%s' % (key, cell))
except ValueError:
cells.append('%s="%s"' % (key, cell))
lua_rows.append(' {' + ', '.join(cells) + '}')
#start_freq, end_freq, allocation, applications = row
#s = ' {start_freq=%s, end_freq=%s, allocation="%s", applications="%s"}' % (start_freq, end_freq, allocation, applications)
#lua_rows.append(s)
s = '%s = {\n%s\n}\n' % (global_name, ',\n'.join(lua_rows))
print s
fp_out = open(out_file, 'w')
fp_out.write(s)
fp_out.close()
def usage():
print "python csv2lua.py <table.csv> <table.lua> <global>"
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
usage()
else:
csv2lua(sys.argv[1], sys.argv[2], sys.argv[3])
|
Add a conversion script to turn CSV into Lua table.
|
Add a conversion script to turn CSV into Lua table.
|
Python
|
mit
|
silky/frequensea,silky/frequensea,fdb/frequensea,silky/frequensea,fdb/frequensea,fdb/frequensea,silky/frequensea,fdb/frequensea,fdb/frequensea,silky/frequensea
|
Add a conversion script to turn CSV into Lua table.
|
# Convert a CSV file to a Lua table script that can be imported using `dofile`.
import csv
def csv2lua(in_file, out_file, global_name):
fp_in = open(in_file, 'r')
rows = list(csv.reader(fp_in))
fp_in.close()
headers = rows[0]
lua_rows = []
for row in rows[1:]:
cells = []
print row
for i, cell in enumerate(row):
key = headers[i]
try:
cell = int(cell)
cells.append('%s=%s' % (key, cell))
except ValueError:
cells.append('%s="%s"' % (key, cell))
lua_rows.append(' {' + ', '.join(cells) + '}')
#start_freq, end_freq, allocation, applications = row
#s = ' {start_freq=%s, end_freq=%s, allocation="%s", applications="%s"}' % (start_freq, end_freq, allocation, applications)
#lua_rows.append(s)
s = '%s = {\n%s\n}\n' % (global_name, ',\n'.join(lua_rows))
print s
fp_out = open(out_file, 'w')
fp_out.write(s)
fp_out.close()
def usage():
print "python csv2lua.py <table.csv> <table.lua> <global>"
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
usage()
else:
csv2lua(sys.argv[1], sys.argv[2], sys.argv[3])
|
<commit_before><commit_msg>Add a conversion script to turn CSV into Lua table.<commit_after>
|
# Convert a CSV file to a Lua table script that can be imported using `dofile`.
import csv
def csv2lua(in_file, out_file, global_name):
fp_in = open(in_file, 'r')
rows = list(csv.reader(fp_in))
fp_in.close()
headers = rows[0]
lua_rows = []
for row in rows[1:]:
cells = []
print row
for i, cell in enumerate(row):
key = headers[i]
try:
cell = int(cell)
cells.append('%s=%s' % (key, cell))
except ValueError:
cells.append('%s="%s"' % (key, cell))
lua_rows.append(' {' + ', '.join(cells) + '}')
#start_freq, end_freq, allocation, applications = row
#s = ' {start_freq=%s, end_freq=%s, allocation="%s", applications="%s"}' % (start_freq, end_freq, allocation, applications)
#lua_rows.append(s)
s = '%s = {\n%s\n}\n' % (global_name, ',\n'.join(lua_rows))
print s
fp_out = open(out_file, 'w')
fp_out.write(s)
fp_out.close()
def usage():
print "python csv2lua.py <table.csv> <table.lua> <global>"
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
usage()
else:
csv2lua(sys.argv[1], sys.argv[2], sys.argv[3])
|
Add a conversion script to turn CSV into Lua table.# Convert a CSV file to a Lua table script that can be imported using `dofile`.
import csv
def csv2lua(in_file, out_file, global_name):
fp_in = open(in_file, 'r')
rows = list(csv.reader(fp_in))
fp_in.close()
headers = rows[0]
lua_rows = []
for row in rows[1:]:
cells = []
print row
for i, cell in enumerate(row):
key = headers[i]
try:
cell = int(cell)
cells.append('%s=%s' % (key, cell))
except ValueError:
cells.append('%s="%s"' % (key, cell))
lua_rows.append(' {' + ', '.join(cells) + '}')
#start_freq, end_freq, allocation, applications = row
#s = ' {start_freq=%s, end_freq=%s, allocation="%s", applications="%s"}' % (start_freq, end_freq, allocation, applications)
#lua_rows.append(s)
s = '%s = {\n%s\n}\n' % (global_name, ',\n'.join(lua_rows))
print s
fp_out = open(out_file, 'w')
fp_out.write(s)
fp_out.close()
def usage():
print "python csv2lua.py <table.csv> <table.lua> <global>"
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
usage()
else:
csv2lua(sys.argv[1], sys.argv[2], sys.argv[3])
|
<commit_before><commit_msg>Add a conversion script to turn CSV into Lua table.<commit_after># Convert a CSV file to a Lua table script that can be imported using `dofile`.
import csv
def csv2lua(in_file, out_file, global_name):
fp_in = open(in_file, 'r')
rows = list(csv.reader(fp_in))
fp_in.close()
headers = rows[0]
lua_rows = []
for row in rows[1:]:
cells = []
print row
for i, cell in enumerate(row):
key = headers[i]
try:
cell = int(cell)
cells.append('%s=%s' % (key, cell))
except ValueError:
cells.append('%s="%s"' % (key, cell))
lua_rows.append(' {' + ', '.join(cells) + '}')
#start_freq, end_freq, allocation, applications = row
#s = ' {start_freq=%s, end_freq=%s, allocation="%s", applications="%s"}' % (start_freq, end_freq, allocation, applications)
#lua_rows.append(s)
s = '%s = {\n%s\n}\n' % (global_name, ',\n'.join(lua_rows))
print s
fp_out = open(out_file, 'w')
fp_out.write(s)
fp_out.close()
def usage():
print "python csv2lua.py <table.csv> <table.lua> <global>"
if __name__ == '__main__':
import sys
if len(sys.argv) != 4:
usage()
else:
csv2lua(sys.argv[1], sys.argv[2], sys.argv[3])
|
|
4b9996463765fa9bdb18e223cce6e64ffe8810ff
|
fabfile.py
|
fabfile.py
|
from fabric.operations import local
def runserver():
local("java -jar stagger/stagger.jar -modelfile models/swedish.bin -server 127.0.0.1 9000")
def tag(filename):
local("python stagger/scripts/tagtcp.py 127.0.0.1 9000 %s" % filename)
|
Add commands to run tagger server and tag using python.
|
Add commands to run tagger server and tag using python.
|
Python
|
mit
|
EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger,EmilStenstrom/json-tagger
|
Add commands to run tagger server and tag using python.
|
from fabric.operations import local
def runserver():
local("java -jar stagger/stagger.jar -modelfile models/swedish.bin -server 127.0.0.1 9000")
def tag(filename):
local("python stagger/scripts/tagtcp.py 127.0.0.1 9000 %s" % filename)
|
<commit_before><commit_msg>Add commands to run tagger server and tag using python.<commit_after>
|
from fabric.operations import local
def runserver():
local("java -jar stagger/stagger.jar -modelfile models/swedish.bin -server 127.0.0.1 9000")
def tag(filename):
local("python stagger/scripts/tagtcp.py 127.0.0.1 9000 %s" % filename)
|
Add commands to run tagger server and tag using python.from fabric.operations import local
def runserver():
local("java -jar stagger/stagger.jar -modelfile models/swedish.bin -server 127.0.0.1 9000")
def tag(filename):
local("python stagger/scripts/tagtcp.py 127.0.0.1 9000 %s" % filename)
|
<commit_before><commit_msg>Add commands to run tagger server and tag using python.<commit_after>from fabric.operations import local
def runserver():
local("java -jar stagger/stagger.jar -modelfile models/swedish.bin -server 127.0.0.1 9000")
def tag(filename):
local("python stagger/scripts/tagtcp.py 127.0.0.1 9000 %s" % filename)
|
|
74a78fc5a48ce834390590031d3d054214609ec0
|
djangocms_blog/cms_app.py
|
djangocms_blog/cms_app.py
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _, get_language
from .menu import BlogCategoryMenu
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from .models import BlogCategory
class BlogCategoryMenu(CMSAttachMenu):
name = _('Blog Category menu')
def get_nodes(self, request):
nodes = []
qs = BlogCategory.objects.translated(get_language())
qs = qs.order_by('parent_id', 'translations__name').distinct()
for category in qs:
kwargs = { 'category': category.slug }
node = NavigationNode(
category.name,
reverse('djangocms_blog:posts-category', kwargs=kwargs),
category.pk,
category.parent_id
)
nodes.append(node)
return nodes
menu_pool.register_menu(BlogCategoryMenu)
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
Attach category menu to CMSApp
|
Attach category menu to CMSApp
|
Python
|
bsd-3-clause
|
motleytech/djangocms-blog,vnavascues/djangocms-blog,mistalaba/djangocms-blog,marty3d/djangocms-blog,EnglishConnection/djangocms-blog,jedie/djangocms-blog,kriwil/djangocms-blog,skirsdeda/djangocms-blog,nephila/djangocms-blog,mistalaba/djangocms-blog,sephii/djangocms-blog,dapeng0802/djangocms-blog,nephila/djangocms-blog,ImaginaryLandscape/djangocms-blog,britny/djangocms-blog,ImaginaryLandscape/djangocms-blog,jedie/djangocms-blog,skirsdeda/djangocms-blog,nephila/djangocms-blog,skirsdeda/djangocms-blog,EnglishConnection/djangocms-blog,dapeng0802/djangocms-blog,kriwil/djangocms-blog,sephii/djangocms-blog,vnavascues/djangocms-blog,britny/djangocms-blog,motleytech/djangocms-blog,marty3d/djangocms-blog
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _, get_language
from .menu import BlogCategoryMenu
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
Attach category menu to CMSApp
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from .models import BlogCategory
class BlogCategoryMenu(CMSAttachMenu):
name = _('Blog Category menu')
def get_nodes(self, request):
nodes = []
qs = BlogCategory.objects.translated(get_language())
qs = qs.order_by('parent_id', 'translations__name').distinct()
for category in qs:
kwargs = { 'category': category.slug }
node = NavigationNode(
category.name,
reverse('djangocms_blog:posts-category', kwargs=kwargs),
category.pk,
category.parent_id
)
nodes.append(node)
return nodes
menu_pool.register_menu(BlogCategoryMenu)
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
<commit_before># -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _, get_language
from .menu import BlogCategoryMenu
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
<commit_msg>Attach category menu to CMSApp<commit_after>
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from .models import BlogCategory
class BlogCategoryMenu(CMSAttachMenu):
name = _('Blog Category menu')
def get_nodes(self, request):
nodes = []
qs = BlogCategory.objects.translated(get_language())
qs = qs.order_by('parent_id', 'translations__name').distinct()
for category in qs:
kwargs = { 'category': category.slug }
node = NavigationNode(
category.name,
reverse('djangocms_blog:posts-category', kwargs=kwargs),
category.pk,
category.parent_id
)
nodes.append(node)
return nodes
menu_pool.register_menu(BlogCategoryMenu)
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _, get_language
from .menu import BlogCategoryMenu
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
Attach category menu to CMSApp# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from .models import BlogCategory
class BlogCategoryMenu(CMSAttachMenu):
name = _('Blog Category menu')
def get_nodes(self, request):
nodes = []
qs = BlogCategory.objects.translated(get_language())
qs = qs.order_by('parent_id', 'translations__name').distinct()
for category in qs:
kwargs = { 'category': category.slug }
node = NavigationNode(
category.name,
reverse('djangocms_blog:posts-category', kwargs=kwargs),
category.pk,
category.parent_id
)
nodes.append(node)
return nodes
menu_pool.register_menu(BlogCategoryMenu)
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
<commit_before># -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _, get_language
from .menu import BlogCategoryMenu
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
<commit_msg>Attach category menu to CMSApp<commit_after># -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from .models import BlogCategory
class BlogCategoryMenu(CMSAttachMenu):
name = _('Blog Category menu')
def get_nodes(self, request):
nodes = []
qs = BlogCategory.objects.translated(get_language())
qs = qs.order_by('parent_id', 'translations__name').distinct()
for category in qs:
kwargs = { 'category': category.slug }
node = NavigationNode(
category.name,
reverse('djangocms_blog:posts-category', kwargs=kwargs),
category.pk,
category.parent_id
)
nodes.append(node)
return nodes
menu_pool.register_menu(BlogCategoryMenu)
class BlogApp(CMSApp):
name = _('Blog')
urls = ['djangocms_blog.urls']
app_name = 'djangocms_blog'
menus = [BlogCategoryMenu]
apphook_pool.register(BlogApp)
|
9e0985fec5bf119708c14e2e4d2bbb099eeab4f7
|
Transformations.py
|
Transformations.py
|
from pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
from transformations.SentenceTransformation import SentenceTransformation
def load(module, class_name):
my_class_py = getattr(module, class_name)
my_class = getattr(my_class_py, class_name)
return my_class()
class TransformationsList:
def __init__(self):
# iterate through the modules in the current package
package_dir = Path(__file__).resolve() # --> Transformations.py
for (_, m, _) in iter_modules([package_dir.parent.joinpath("transformations")]):
import_module(f"transformations.{m}")
module = __import__("transformations")
transformations = [load(module, cls.__name__) for cls in SentenceTransformation.__subclasses__()]
self.transformations = transformations
def generate(self, sentence: str):
print(f"Original Input : {sentence}")
generations = {"Original": sentence}
for transformation in self.transformations:
generations[transformation.name()] = transformation.generate(sentence)
return generations
|
Load sub-classes without using explicit names
|
Load sub-classes without using explicit names
|
Python
|
mit
|
GEM-benchmark/NL-Augmenter
|
Load sub-classes without using explicit names
|
from pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
from transformations.SentenceTransformation import SentenceTransformation
def load(module, class_name):
my_class_py = getattr(module, class_name)
my_class = getattr(my_class_py, class_name)
return my_class()
class TransformationsList:
def __init__(self):
# iterate through the modules in the current package
package_dir = Path(__file__).resolve() # --> Transformations.py
for (_, m, _) in iter_modules([package_dir.parent.joinpath("transformations")]):
import_module(f"transformations.{m}")
module = __import__("transformations")
transformations = [load(module, cls.__name__) for cls in SentenceTransformation.__subclasses__()]
self.transformations = transformations
def generate(self, sentence: str):
print(f"Original Input : {sentence}")
generations = {"Original": sentence}
for transformation in self.transformations:
generations[transformation.name()] = transformation.generate(sentence)
return generations
|
<commit_before><commit_msg>Load sub-classes without using explicit names<commit_after>
|
from pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
from transformations.SentenceTransformation import SentenceTransformation
def load(module, class_name):
my_class_py = getattr(module, class_name)
my_class = getattr(my_class_py, class_name)
return my_class()
class TransformationsList:
def __init__(self):
# iterate through the modules in the current package
package_dir = Path(__file__).resolve() # --> Transformations.py
for (_, m, _) in iter_modules([package_dir.parent.joinpath("transformations")]):
import_module(f"transformations.{m}")
module = __import__("transformations")
transformations = [load(module, cls.__name__) for cls in SentenceTransformation.__subclasses__()]
self.transformations = transformations
def generate(self, sentence: str):
print(f"Original Input : {sentence}")
generations = {"Original": sentence}
for transformation in self.transformations:
generations[transformation.name()] = transformation.generate(sentence)
return generations
|
Load sub-classes without using explicit namesfrom pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
from transformations.SentenceTransformation import SentenceTransformation
def load(module, class_name):
my_class_py = getattr(module, class_name)
my_class = getattr(my_class_py, class_name)
return my_class()
class TransformationsList:
def __init__(self):
# iterate through the modules in the current package
package_dir = Path(__file__).resolve() # --> Transformations.py
for (_, m, _) in iter_modules([package_dir.parent.joinpath("transformations")]):
import_module(f"transformations.{m}")
module = __import__("transformations")
transformations = [load(module, cls.__name__) for cls in SentenceTransformation.__subclasses__()]
self.transformations = transformations
def generate(self, sentence: str):
print(f"Original Input : {sentence}")
generations = {"Original": sentence}
for transformation in self.transformations:
generations[transformation.name()] = transformation.generate(sentence)
return generations
|
<commit_before><commit_msg>Load sub-classes without using explicit names<commit_after>from pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
from transformations.SentenceTransformation import SentenceTransformation
def load(module, class_name):
my_class_py = getattr(module, class_name)
my_class = getattr(my_class_py, class_name)
return my_class()
class TransformationsList:
def __init__(self):
# iterate through the modules in the current package
package_dir = Path(__file__).resolve() # --> Transformations.py
for (_, m, _) in iter_modules([package_dir.parent.joinpath("transformations")]):
import_module(f"transformations.{m}")
module = __import__("transformations")
transformations = [load(module, cls.__name__) for cls in SentenceTransformation.__subclasses__()]
self.transformations = transformations
def generate(self, sentence: str):
print(f"Original Input : {sentence}")
generations = {"Original": sentence}
for transformation in self.transformations:
generations[transformation.name()] = transformation.generate(sentence)
return generations
|
|
acaf9980766a3ded7894dec32df09e9f73f626bf
|
test/test_expressions.py
|
test/test_expressions.py
|
import genson
def test_unary():
g = genson.loads('{ "p" : sin(-1) } ')
assert( g['p'].args[0] == -1)
g = genson.loads('{ "p" : sin(+1) } ')
assert( g['p'].args[0] == 1)
def test_binary():
g = genson.loads('{ "p" : gaussian(1+1,1) } ')
assert( g['p'].mean == 2)
assert( g['p'].stdev == 1)
|
Add tests for genson expression
|
Add tests for genson expression
|
Python
|
mit
|
davidcox/genson
|
Add tests for genson expression
|
import genson
def test_unary():
g = genson.loads('{ "p" : sin(-1) } ')
assert( g['p'].args[0] == -1)
g = genson.loads('{ "p" : sin(+1) } ')
assert( g['p'].args[0] == 1)
def test_binary():
g = genson.loads('{ "p" : gaussian(1+1,1) } ')
assert( g['p'].mean == 2)
assert( g['p'].stdev == 1)
|
<commit_before><commit_msg>Add tests for genson expression<commit_after>
|
import genson
def test_unary():
g = genson.loads('{ "p" : sin(-1) } ')
assert( g['p'].args[0] == -1)
g = genson.loads('{ "p" : sin(+1) } ')
assert( g['p'].args[0] == 1)
def test_binary():
g = genson.loads('{ "p" : gaussian(1+1,1) } ')
assert( g['p'].mean == 2)
assert( g['p'].stdev == 1)
|
Add tests for genson expressionimport genson
def test_unary():
g = genson.loads('{ "p" : sin(-1) } ')
assert( g['p'].args[0] == -1)
g = genson.loads('{ "p" : sin(+1) } ')
assert( g['p'].args[0] == 1)
def test_binary():
g = genson.loads('{ "p" : gaussian(1+1,1) } ')
assert( g['p'].mean == 2)
assert( g['p'].stdev == 1)
|
<commit_before><commit_msg>Add tests for genson expression<commit_after>import genson
def test_unary():
g = genson.loads('{ "p" : sin(-1) } ')
assert( g['p'].args[0] == -1)
g = genson.loads('{ "p" : sin(+1) } ')
assert( g['p'].args[0] == 1)
def test_binary():
g = genson.loads('{ "p" : gaussian(1+1,1) } ')
assert( g['p'].mean == 2)
assert( g['p'].stdev == 1)
|
|
0cefbaa9355887ab1a03008f434ecb315bbe32ba
|
test/statements/import6.py
|
test/statements/import6.py
|
from __future__ import generator_stop
from : keyword.control.flow.python, source.python
: source.python
__future__ : source.python, support.variable.magic.python
: source.python
import : keyword.control.import.python, source.python
: source.python
generator_stop : source.python
|
Add a test for "from __future__ import .."
|
Add a test for "from __future__ import .."
|
Python
|
mit
|
MagicStack/MagicPython,MagicStack/MagicPython,MagicStack/MagicPython
|
Add a test for "from __future__ import .."
|
from __future__ import generator_stop
from : keyword.control.flow.python, source.python
: source.python
__future__ : source.python, support.variable.magic.python
: source.python
import : keyword.control.import.python, source.python
: source.python
generator_stop : source.python
|
<commit_before><commit_msg>Add a test for "from __future__ import .."<commit_after>
|
from __future__ import generator_stop
from : keyword.control.flow.python, source.python
: source.python
__future__ : source.python, support.variable.magic.python
: source.python
import : keyword.control.import.python, source.python
: source.python
generator_stop : source.python
|
Add a test for "from __future__ import .."from __future__ import generator_stop
from : keyword.control.flow.python, source.python
: source.python
__future__ : source.python, support.variable.magic.python
: source.python
import : keyword.control.import.python, source.python
: source.python
generator_stop : source.python
|
<commit_before><commit_msg>Add a test for "from __future__ import .."<commit_after>from __future__ import generator_stop
from : keyword.control.flow.python, source.python
: source.python
__future__ : source.python, support.variable.magic.python
: source.python
import : keyword.control.import.python, source.python
: source.python
generator_stop : source.python
|
|
92e0616c7158f0e38b3dab8b5f347e8eef6d899c
|
balance_newlines.py
|
balance_newlines.py
|
#!/usr/bin/env python
import sys
def main():
def width(lines):
return max(map(len, [' '.join(l) for l in lines]))
lines = [x.split(' ') for x in sys.stdin.read().strip().split('\n')]
print >>sys.stderr, 'Before - max width:', width(lines)
making_progress = True
while making_progress:
making_progress = False
for i, l in enumerate(lines):
if not len(l):
continue
if i > 0:
ow = width(lines[i-1:i+1])
lines[i-1].append(l.pop(0))
nw = width(lines[i-1:i+1])
if nw < ow:
making_progress = True
break
l.insert(0, lines[i-1].pop(-1))
if i < len(lines) - 1:
ow = width(lines[i:i+2])
lines[i+1].insert(0, l.pop(-1))
nw = width(lines[i:i+2])
if nw < ow:
making_progress = True
break
l.append(lines[i+1].pop(0))
print >>sys.stderr, 'After - max width:', width(lines)
for l in lines:
print ' '.join(l)
if __name__ == '__main__':
main()
|
Add python script to balance the line length in some text
|
Add python script to balance the line length in some text
Useful to find the optimal newline placement in titles that have
wrapped to balance the line length, for instance:
"This is a long long title that is just ever so slightly longer than one
line."
Becomes:
"This is a long long title that is just
ever so slightly longer than one line."
|
Python
|
mit
|
DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk
|
Add python script to balance the line length in some text
Useful to find the optimal newline placement in titles that have
wrapped to balance the line length, for instance:
"This is a long long title that is just ever so slightly longer than one
line."
Becomes:
"This is a long long title that is just
ever so slightly longer than one line."
|
#!/usr/bin/env python
import sys
def main():
def width(lines):
return max(map(len, [' '.join(l) for l in lines]))
lines = [x.split(' ') for x in sys.stdin.read().strip().split('\n')]
print >>sys.stderr, 'Before - max width:', width(lines)
making_progress = True
while making_progress:
making_progress = False
for i, l in enumerate(lines):
if not len(l):
continue
if i > 0:
ow = width(lines[i-1:i+1])
lines[i-1].append(l.pop(0))
nw = width(lines[i-1:i+1])
if nw < ow:
making_progress = True
break
l.insert(0, lines[i-1].pop(-1))
if i < len(lines) - 1:
ow = width(lines[i:i+2])
lines[i+1].insert(0, l.pop(-1))
nw = width(lines[i:i+2])
if nw < ow:
making_progress = True
break
l.append(lines[i+1].pop(0))
print >>sys.stderr, 'After - max width:', width(lines)
for l in lines:
print ' '.join(l)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to balance the line length in some text
Useful to find the optimal newline placement in titles that have
wrapped to balance the line length, for instance:
"This is a long long title that is just ever so slightly longer than one
line."
Becomes:
"This is a long long title that is just
ever so slightly longer than one line."<commit_after>
|
#!/usr/bin/env python
import sys
def main():
def width(lines):
return max(map(len, [' '.join(l) for l in lines]))
lines = [x.split(' ') for x in sys.stdin.read().strip().split('\n')]
print >>sys.stderr, 'Before - max width:', width(lines)
making_progress = True
while making_progress:
making_progress = False
for i, l in enumerate(lines):
if not len(l):
continue
if i > 0:
ow = width(lines[i-1:i+1])
lines[i-1].append(l.pop(0))
nw = width(lines[i-1:i+1])
if nw < ow:
making_progress = True
break
l.insert(0, lines[i-1].pop(-1))
if i < len(lines) - 1:
ow = width(lines[i:i+2])
lines[i+1].insert(0, l.pop(-1))
nw = width(lines[i:i+2])
if nw < ow:
making_progress = True
break
l.append(lines[i+1].pop(0))
print >>sys.stderr, 'After - max width:', width(lines)
for l in lines:
print ' '.join(l)
if __name__ == '__main__':
main()
|
Add python script to balance the line length in some text
Useful to find the optimal newline placement in titles that have
wrapped to balance the line length, for instance:
"This is a long long title that is just ever so slightly longer than one
line."
Becomes:
"This is a long long title that is just
ever so slightly longer than one line."#!/usr/bin/env python
import sys
def main():
def width(lines):
return max(map(len, [' '.join(l) for l in lines]))
lines = [x.split(' ') for x in sys.stdin.read().strip().split('\n')]
print >>sys.stderr, 'Before - max width:', width(lines)
making_progress = True
while making_progress:
making_progress = False
for i, l in enumerate(lines):
if not len(l):
continue
if i > 0:
ow = width(lines[i-1:i+1])
lines[i-1].append(l.pop(0))
nw = width(lines[i-1:i+1])
if nw < ow:
making_progress = True
break
l.insert(0, lines[i-1].pop(-1))
if i < len(lines) - 1:
ow = width(lines[i:i+2])
lines[i+1].insert(0, l.pop(-1))
nw = width(lines[i:i+2])
if nw < ow:
making_progress = True
break
l.append(lines[i+1].pop(0))
print >>sys.stderr, 'After - max width:', width(lines)
for l in lines:
print ' '.join(l)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to balance the line length in some text
Useful to find the optimal newline placement in titles that have
wrapped to balance the line length, for instance:
"This is a long long title that is just ever so slightly longer than one
line."
Becomes:
"This is a long long title that is just
ever so slightly longer than one line."<commit_after>#!/usr/bin/env python
import sys
def main():
def width(lines):
return max(map(len, [' '.join(l) for l in lines]))
lines = [x.split(' ') for x in sys.stdin.read().strip().split('\n')]
print >>sys.stderr, 'Before - max width:', width(lines)
making_progress = True
while making_progress:
making_progress = False
for i, l in enumerate(lines):
if not len(l):
continue
if i > 0:
ow = width(lines[i-1:i+1])
lines[i-1].append(l.pop(0))
nw = width(lines[i-1:i+1])
if nw < ow:
making_progress = True
break
l.insert(0, lines[i-1].pop(-1))
if i < len(lines) - 1:
ow = width(lines[i:i+2])
lines[i+1].insert(0, l.pop(-1))
nw = width(lines[i:i+2])
if nw < ow:
making_progress = True
break
l.append(lines[i+1].pop(0))
print >>sys.stderr, 'After - max width:', width(lines)
for l in lines:
print ' '.join(l)
if __name__ == '__main__':
main()
|
|
cd7e5f0b5107be3d64e80ed840eb7c32c96a19da
|
tests/functional/registration/test_discovery.py
|
tests/functional/registration/test_discovery.py
|
"""
Test discovering of registered languages and generators.
"""
import subprocess
def test_list_languages_cli():
"""
Test list-languages command.
"""
output = subprocess.check_output(['textx', 'list-languages'],
stderr=subprocess.STDOUT)
assert b'flow-dsl' in output
assert b'*.eflow' in output
assert b'data-dsl' in output
def test_list_generators_cli():
"""
Test list-generators command.
"""
output = subprocess.check_output(['textx', 'list-generators'],
stderr=subprocess.STDOUT)
assert b'flow-dsl -> PlantUML' in output
|
Add initial tests for discovery
|
Add initial tests for discovery
|
Python
|
mit
|
igordejanovic/textX,igordejanovic/textX,igordejanovic/textX
|
Add initial tests for discovery
|
"""
Test discovering of registered languages and generators.
"""
import subprocess
def test_list_languages_cli():
"""
Test list-languages command.
"""
output = subprocess.check_output(['textx', 'list-languages'],
stderr=subprocess.STDOUT)
assert b'flow-dsl' in output
assert b'*.eflow' in output
assert b'data-dsl' in output
def test_list_generators_cli():
"""
Test list-generators command.
"""
output = subprocess.check_output(['textx', 'list-generators'],
stderr=subprocess.STDOUT)
assert b'flow-dsl -> PlantUML' in output
|
<commit_before><commit_msg>Add initial tests for discovery<commit_after>
|
"""
Test discovering of registered languages and generators.
"""
import subprocess
def test_list_languages_cli():
"""
Test list-languages command.
"""
output = subprocess.check_output(['textx', 'list-languages'],
stderr=subprocess.STDOUT)
assert b'flow-dsl' in output
assert b'*.eflow' in output
assert b'data-dsl' in output
def test_list_generators_cli():
"""
Test list-generators command.
"""
output = subprocess.check_output(['textx', 'list-generators'],
stderr=subprocess.STDOUT)
assert b'flow-dsl -> PlantUML' in output
|
Add initial tests for discovery"""
Test discovering of registered languages and generators.
"""
import subprocess
def test_list_languages_cli():
"""
Test list-languages command.
"""
output = subprocess.check_output(['textx', 'list-languages'],
stderr=subprocess.STDOUT)
assert b'flow-dsl' in output
assert b'*.eflow' in output
assert b'data-dsl' in output
def test_list_generators_cli():
"""
Test list-generators command.
"""
output = subprocess.check_output(['textx', 'list-generators'],
stderr=subprocess.STDOUT)
assert b'flow-dsl -> PlantUML' in output
|
<commit_before><commit_msg>Add initial tests for discovery<commit_after>"""
Test discovering of registered languages and generators.
"""
import subprocess
def test_list_languages_cli():
"""
Test list-languages command.
"""
output = subprocess.check_output(['textx', 'list-languages'],
stderr=subprocess.STDOUT)
assert b'flow-dsl' in output
assert b'*.eflow' in output
assert b'data-dsl' in output
def test_list_generators_cli():
"""
Test list-generators command.
"""
output = subprocess.check_output(['textx', 'list-generators'],
stderr=subprocess.STDOUT)
assert b'flow-dsl -> PlantUML' in output
|
|
bb8150d7174ae9329fe3a2fcc1937bb72d3e9ddf
|
liwc2es.py
|
liwc2es.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to put a folia xml file in ElasticSearch.
"""
import codecs
import os
import time
from datetime import datetime
from elasticsearch import Elasticsearch, client
from lxml import etree
from bs4 import BeautifulSoup
from emotools.plays import extract_character_name, xml_id2play_id
from emotools.bs4_helpers import sentence, note, entity
if __name__ == '__main__':
# Load liwc dict
with codecs.open('historic_Dutch_LIWC.dic', 'rb', 'utf8') as f:
lines = f.readlines()
liwc_categories = {}
liwc_dict = {}
for line in lines:
# LIWC category
if line[0].isdigit():
entry = line.split()
# remove 0 from strings like 01
c = str(int(entry[0]))
liwc_categories[c] = entry[1]
# word
elif line[0].isalpha():
entry = line.split()
term = entry[0]
categories = entry[1:]
liwc_dict[term] = categories
# Make dictionary of the form {liwc category: [word, word, word, ...]}
liwc = {}
for term, cats in liwc_dict.iteritems():
for c in cats:
cat = liwc_categories.get(c)
if cat not in liwc.keys():
liwc[cat] = []
liwc[cat].append(term)
# TODO: ES host + port as script arguments
es = Elasticsearch()
# TODO: index name as script argument
index_name = 'embodied_emotions'
type_name = 'entitycategory'
mapping = {
'entitycategory': {
'_id': {
'path': 'name'
},
'properties': {
'name': {
'type': 'string',
'index': 'not_analyzed'
},
'words': {
'type': 'string',
'index': 'not_analyzed'
},
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=mapping)
for cat, words in liwc.iteritems():
print cat
doc = {
'name': cat,
'words': words
}
es.index(index=index_name, doc_type=type_name, body=doc)
|
Add script to save liwc categories + words to elasticsearch
|
Add script to save liwc categories + words to elasticsearch
Added a script that saves the liwc categories + their words to
elasticsearch. The liwc dictionary is saved in the type entitycategory,
with fields 'name', 'words'. This data is saved in order to be able to
get an overview of all words that belong to a certain category.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to save liwc categories + words to elasticsearch
Added a script that saves the liwc categories + their words to
elasticsearch. The liwc dictionary is saved in the type entitycategory,
with fields 'name', 'words'. This data is saved in order to be able to
get an overview of all words that belong to a certain category.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to put a folia xml file in ElasticSearch.
"""
import codecs
import os
import time
from datetime import datetime
from elasticsearch import Elasticsearch, client
from lxml import etree
from bs4 import BeautifulSoup
from emotools.plays import extract_character_name, xml_id2play_id
from emotools.bs4_helpers import sentence, note, entity
if __name__ == '__main__':
# Load liwc dict
with codecs.open('historic_Dutch_LIWC.dic', 'rb', 'utf8') as f:
lines = f.readlines()
liwc_categories = {}
liwc_dict = {}
for line in lines:
# LIWC category
if line[0].isdigit():
entry = line.split()
# remove 0 from strings like 01
c = str(int(entry[0]))
liwc_categories[c] = entry[1]
# word
elif line[0].isalpha():
entry = line.split()
term = entry[0]
categories = entry[1:]
liwc_dict[term] = categories
# Make dictionary of the form {liwc category: [word, word, word, ...]}
liwc = {}
for term, cats in liwc_dict.iteritems():
for c in cats:
cat = liwc_categories.get(c)
if cat not in liwc.keys():
liwc[cat] = []
liwc[cat].append(term)
# TODO: ES host + port as script arguments
es = Elasticsearch()
# TODO: index name as script argument
index_name = 'embodied_emotions'
type_name = 'entitycategory'
mapping = {
'entitycategory': {
'_id': {
'path': 'name'
},
'properties': {
'name': {
'type': 'string',
'index': 'not_analyzed'
},
'words': {
'type': 'string',
'index': 'not_analyzed'
},
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=mapping)
for cat, words in liwc.iteritems():
print cat
doc = {
'name': cat,
'words': words
}
es.index(index=index_name, doc_type=type_name, body=doc)
|
<commit_before><commit_msg>Add script to save liwc categories + words to elasticsearch
Added a script that saves the liwc categories + their words to
elasticsearch. The liwc dictionary is saved in the type entitycategory,
with fields 'name', 'words'. This data is saved in order to be able to
get an overview of all words that belong to a certain category.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to put a folia xml file in ElasticSearch.
"""
import codecs
import os
import time
from datetime import datetime
from elasticsearch import Elasticsearch, client
from lxml import etree
from bs4 import BeautifulSoup
from emotools.plays import extract_character_name, xml_id2play_id
from emotools.bs4_helpers import sentence, note, entity
if __name__ == '__main__':
# Load liwc dict
with codecs.open('historic_Dutch_LIWC.dic', 'rb', 'utf8') as f:
lines = f.readlines()
liwc_categories = {}
liwc_dict = {}
for line in lines:
# LIWC category
if line[0].isdigit():
entry = line.split()
# remove 0 from strings like 01
c = str(int(entry[0]))
liwc_categories[c] = entry[1]
# word
elif line[0].isalpha():
entry = line.split()
term = entry[0]
categories = entry[1:]
liwc_dict[term] = categories
# Make dictionary of the form {liwc category: [word, word, word, ...]}
liwc = {}
for term, cats in liwc_dict.iteritems():
for c in cats:
cat = liwc_categories.get(c)
if cat not in liwc.keys():
liwc[cat] = []
liwc[cat].append(term)
# TODO: ES host + port as script arguments
es = Elasticsearch()
# TODO: index name as script argument
index_name = 'embodied_emotions'
type_name = 'entitycategory'
mapping = {
'entitycategory': {
'_id': {
'path': 'name'
},
'properties': {
'name': {
'type': 'string',
'index': 'not_analyzed'
},
'words': {
'type': 'string',
'index': 'not_analyzed'
},
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=mapping)
for cat, words in liwc.iteritems():
print cat
doc = {
'name': cat,
'words': words
}
es.index(index=index_name, doc_type=type_name, body=doc)
|
Add script to save liwc categories + words to elasticsearch
Added a script that saves the liwc categories + their words to
elasticsearch. The liwc dictionary is saved in the type entitycategory,
with fields 'name', 'words'. This data is saved in order to be able to
get an overview of all words that belong to a certain category.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to put a folia xml file in ElasticSearch.
"""
import codecs
import os
import time
from datetime import datetime
from elasticsearch import Elasticsearch, client
from lxml import etree
from bs4 import BeautifulSoup
from emotools.plays import extract_character_name, xml_id2play_id
from emotools.bs4_helpers import sentence, note, entity
if __name__ == '__main__':
# Load liwc dict
with codecs.open('historic_Dutch_LIWC.dic', 'rb', 'utf8') as f:
lines = f.readlines()
liwc_categories = {}
liwc_dict = {}
for line in lines:
# LIWC category
if line[0].isdigit():
entry = line.split()
# remove 0 from strings like 01
c = str(int(entry[0]))
liwc_categories[c] = entry[1]
# word
elif line[0].isalpha():
entry = line.split()
term = entry[0]
categories = entry[1:]
liwc_dict[term] = categories
# Make dictionary of the form {liwc category: [word, word, word, ...]}
liwc = {}
for term, cats in liwc_dict.iteritems():
for c in cats:
cat = liwc_categories.get(c)
if cat not in liwc.keys():
liwc[cat] = []
liwc[cat].append(term)
# TODO: ES host + port as script arguments
es = Elasticsearch()
# TODO: index name as script argument
index_name = 'embodied_emotions'
type_name = 'entitycategory'
mapping = {
'entitycategory': {
'_id': {
'path': 'name'
},
'properties': {
'name': {
'type': 'string',
'index': 'not_analyzed'
},
'words': {
'type': 'string',
'index': 'not_analyzed'
},
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=mapping)
for cat, words in liwc.iteritems():
print cat
doc = {
'name': cat,
'words': words
}
es.index(index=index_name, doc_type=type_name, body=doc)
|
<commit_before><commit_msg>Add script to save liwc categories + words to elasticsearch
Added a script that saves the liwc categories + their words to
elasticsearch. The liwc dictionary is saved in the type entitycategory,
with fields 'name', 'words'. This data is saved in order to be able to
get an overview of all words that belong to a certain category.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to put a folia xml file in ElasticSearch.
"""
import codecs
import os
import time
from datetime import datetime
from elasticsearch import Elasticsearch, client
from lxml import etree
from bs4 import BeautifulSoup
from emotools.plays import extract_character_name, xml_id2play_id
from emotools.bs4_helpers import sentence, note, entity
if __name__ == '__main__':
# Load liwc dict
with codecs.open('historic_Dutch_LIWC.dic', 'rb', 'utf8') as f:
lines = f.readlines()
liwc_categories = {}
liwc_dict = {}
for line in lines:
# LIWC category
if line[0].isdigit():
entry = line.split()
# remove 0 from strings like 01
c = str(int(entry[0]))
liwc_categories[c] = entry[1]
# word
elif line[0].isalpha():
entry = line.split()
term = entry[0]
categories = entry[1:]
liwc_dict[term] = categories
# Make dictionary of the form {liwc category: [word, word, word, ...]}
liwc = {}
for term, cats in liwc_dict.iteritems():
for c in cats:
cat = liwc_categories.get(c)
if cat not in liwc.keys():
liwc[cat] = []
liwc[cat].append(term)
# TODO: ES host + port as script arguments
es = Elasticsearch()
# TODO: index name as script argument
index_name = 'embodied_emotions'
type_name = 'entitycategory'
mapping = {
'entitycategory': {
'_id': {
'path': 'name'
},
'properties': {
'name': {
'type': 'string',
'index': 'not_analyzed'
},
'words': {
'type': 'string',
'index': 'not_analyzed'
},
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=mapping)
for cat, words in liwc.iteritems():
print cat
doc = {
'name': cat,
'words': words
}
es.index(index=index_name, doc_type=type_name, body=doc)
|
|
8aff9f37380444e929f54ebbb7679e4692d14a82
|
get-waagent.py
|
get-waagent.py
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent setup.py
#
# Copyright 2013 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib2
import zipfile
import subprocess
def DownloadAndSaveFile(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def Main():
User='yuezh'
Project='WALinuxAgent'
Ref='2.1'
TargetDir='.'
Depo="{0}/{1}".format(User, Project)
ZipFile='{0}-{1}.zip'.format(Project, Ref)
ZipFileUri='https://github.com/{0}/archive/{1}.zip'.format(Depo, Ref)
print "Download zip file..."
DownloadAndSaveFile(ZipFileUri, ZipFile)
zfile = zipfile.ZipFile(ZipFile)
zfile.extractall(TargetDir)
os.remove(ZipFile)
if __name__ == '__main__':
Main()
|
Add 'one script to go' support
|
Add 'one script to go' support
|
Python
|
apache-2.0
|
rjschwei/WALinuxAgent,hglkrijger/WALinuxAgent,nathanleclaire/WALinuxAgent,hglkrijger/WALinuxAgent,rjschwei/WALinuxAgent,Azure/WALinuxAgent,nathanleclaire/WALinuxAgent,andyliuliming/WALinuxAgent,Azure/WALinuxAgent,andyliuliming/WALinuxAgent
|
Add 'one script to go' support
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent setup.py
#
# Copyright 2013 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib2
import zipfile
import subprocess
def DownloadAndSaveFile(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def Main():
User='yuezh'
Project='WALinuxAgent'
Ref='2.1'
TargetDir='.'
Depo="{0}/{1}".format(User, Project)
ZipFile='{0}-{1}.zip'.format(Project, Ref)
ZipFileUri='https://github.com/{0}/archive/{1}.zip'.format(Depo, Ref)
print "Download zip file..."
DownloadAndSaveFile(ZipFileUri, ZipFile)
zfile = zipfile.ZipFile(ZipFile)
zfile.extractall(TargetDir)
os.remove(ZipFile)
if __name__ == '__main__':
Main()
|
<commit_before><commit_msg>Add 'one script to go' support<commit_after>
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent setup.py
#
# Copyright 2013 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib2
import zipfile
import subprocess
def DownloadAndSaveFile(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def Main():
User='yuezh'
Project='WALinuxAgent'
Ref='2.1'
TargetDir='.'
Depo="{0}/{1}".format(User, Project)
ZipFile='{0}-{1}.zip'.format(Project, Ref)
ZipFileUri='https://github.com/{0}/archive/{1}.zip'.format(Depo, Ref)
print "Download zip file..."
DownloadAndSaveFile(ZipFileUri, ZipFile)
zfile = zipfile.ZipFile(ZipFile)
zfile.extractall(TargetDir)
os.remove(ZipFile)
if __name__ == '__main__':
Main()
|
Add 'one script to go' support#!/usr/bin/env python
#
# Windows Azure Linux Agent setup.py
#
# Copyright 2013 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib2
import zipfile
import subprocess
def DownloadAndSaveFile(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def Main():
User='yuezh'
Project='WALinuxAgent'
Ref='2.1'
TargetDir='.'
Depo="{0}/{1}".format(User, Project)
ZipFile='{0}-{1}.zip'.format(Project, Ref)
ZipFileUri='https://github.com/{0}/archive/{1}.zip'.format(Depo, Ref)
print "Download zip file..."
DownloadAndSaveFile(ZipFileUri, ZipFile)
zfile = zipfile.ZipFile(ZipFile)
zfile.extractall(TargetDir)
os.remove(ZipFile)
if __name__ == '__main__':
Main()
|
<commit_before><commit_msg>Add 'one script to go' support<commit_after>#!/usr/bin/env python
#
# Windows Azure Linux Agent setup.py
#
# Copyright 2013 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib2
import zipfile
import subprocess
def DownloadAndSaveFile(uri, file_path):
src = urllib2.urlopen(uri)
dest = open(file_path, 'wb')
buf_size = 1024
buf = src.read(buf_size)
while(buf):
dest.write(buf)
buf = src.read(buf_size)
def Main():
User='yuezh'
Project='WALinuxAgent'
Ref='2.1'
TargetDir='.'
Depo="{0}/{1}".format(User, Project)
ZipFile='{0}-{1}.zip'.format(Project, Ref)
ZipFileUri='https://github.com/{0}/archive/{1}.zip'.format(Depo, Ref)
print "Download zip file..."
DownloadAndSaveFile(ZipFileUri, ZipFile)
zfile = zipfile.ZipFile(ZipFile)
zfile.extractall(TargetDir)
os.remove(ZipFile)
if __name__ == '__main__':
Main()
|
|
896880f84dcf6615fe33123dd1e6fe24bc1a7896
|
tests/manual/check_model_utils.py
|
tests/manual/check_model_utils.py
|
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.model_utils as model_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
model_utils.print_model_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
model_utils.print_graph_layer_shapes(graph,
{'input1':(1, 32), 'input2':(1, 1, 28, 28)})
print('Test script complete')
|
Add a test script for model_utils
|
Add a test script for model_utils
|
Python
|
mit
|
harshhemani/keras,cheng6076/keras,marchick209/keras,untom/keras,daviddiazvico/keras,printedheart/keras,Cadene/keras,dolaameng/keras,gavinmh/keras,Smerity/keras,jalexvig/keras,MagicSen/keras,zhmz90/keras,tencrance/keras,iamtrask/keras,nt/keras,iScienceLuvr/keras,mikekestemont/keras,rodrigob/keras,imcomking/Convolutional-GRU-keras-extension-,saurav111/keras,EderSantana/keras,Yingmin-Li/keras,fmacias64/keras,gamer13/keras,xiaoda99/keras,Aureliu/keras,keras-team/keras,meanmee/keras,hhaoyan/keras,dribnet/keras,eulerreich/keras,keskarnitish/keras,nebw/keras,jayhetee/keras,pthaike/keras,nzer0/keras,keras-team/keras,ml-lab/keras,jiumem/keras,sjuvekar/keras,bottler/keras,zxytim/keras,chenych11/keras,wubr2000/keras,zhangxujinsh/keras,rudaoshi/keras,kod3r/keras,navyjeff/keras,brainwater/keras,DLlearn/keras,ashhher3/keras,rlkelly/keras,florentchandelier/keras,JasonTam/keras,abayowbo/keras,jimgoo/keras,kuza55/keras,kemaswill/keras,dxj19831029/keras,asampat3090/keras,ogrisel/keras,wxs/keras,dhruvparamhans/keras,yingzha/keras,OlafLee/keras,happyboy310/keras,pjadzinsky/keras,amy12xx/keras,nehz/keras,danielforsyth/keras,cvfish/keras,llcao/keras,3dconv/keras,ekamioka/keras,ledbetdr/keras,vseledkin/keras,LIBOTAO/keras,jasonyaw/keras,relh/keras,bboalimoe/keras,johmathe/keras,stephenbalaban/keras,why11002526/keras,jbolinge/keras,zxsted/keras,DeepGnosis/keras,xurantju/keras
|
Add a test script for model_utils
|
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.model_utils as model_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
model_utils.print_model_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
model_utils.print_graph_layer_shapes(graph,
{'input1':(1, 32), 'input2':(1, 1, 28, 28)})
print('Test script complete')
|
<commit_before><commit_msg>Add a test script for model_utils<commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.model_utils as model_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
model_utils.print_model_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
model_utils.print_graph_layer_shapes(graph,
{'input1':(1, 32), 'input2':(1, 1, 28, 28)})
print('Test script complete')
|
Add a test script for model_utilsfrom __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.model_utils as model_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
model_utils.print_model_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
model_utils.print_graph_layer_shapes(graph,
{'input1':(1, 32), 'input2':(1, 1, 28, 28)})
print('Test script complete')
|
<commit_before><commit_msg>Add a test script for model_utils<commit_after>from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.model_utils as model_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(poolsize=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
model_utils.print_model_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
model_utils.print_graph_layer_shapes(graph,
{'input1':(1, 32), 'input2':(1, 1, 28, 28)})
print('Test script complete')
|
|
ab64c3b060a55f1ddcceed3613628a7ba88113bc
|
testing/test_storm_c.py
|
testing/test_storm_c.py
|
#! /usr/bin/env python
#
# Tests for the C version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
c_dir = os.path.join(start_dir, 'c')
input_file = 'wind.in'
output_files = ('wdir.data', 'windx.data', 'windy.data')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** C tests')
os.chdir(c_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'cleaner'])
os.chdir(start_dir)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), c_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
Add unit tests for C version of
|
Add unit tests for C version of
|
Python
|
mit
|
mdpiper/storm,csdms-contrib/storm,csdms-contrib/storm,mdpiper/storm
|
Add unit tests for C version of
|
#! /usr/bin/env python
#
# Tests for the C version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
c_dir = os.path.join(start_dir, 'c')
input_file = 'wind.in'
output_files = ('wdir.data', 'windx.data', 'windy.data')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** C tests')
os.chdir(c_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'cleaner'])
os.chdir(start_dir)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), c_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
<commit_before><commit_msg>Add unit tests for C version of<commit_after>
|
#! /usr/bin/env python
#
# Tests for the C version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
c_dir = os.path.join(start_dir, 'c')
input_file = 'wind.in'
output_files = ('wdir.data', 'windx.data', 'windy.data')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** C tests')
os.chdir(c_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'cleaner'])
os.chdir(start_dir)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), c_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
Add unit tests for C version of#! /usr/bin/env python
#
# Tests for the C version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
c_dir = os.path.join(start_dir, 'c')
input_file = 'wind.in'
output_files = ('wdir.data', 'windx.data', 'windy.data')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** C tests')
os.chdir(c_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'cleaner'])
os.chdir(start_dir)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), c_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
<commit_before><commit_msg>Add unit tests for C version of<commit_after>#! /usr/bin/env python
#
# Tests for the C version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
c_dir = os.path.join(start_dir, 'c')
input_file = 'wind.in'
output_files = ('wdir.data', 'windx.data', 'windy.data')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** C tests')
os.chdir(c_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'cleaner'])
os.chdir(start_dir)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), c_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
|
f8ee77368da560d1becb6738ef4d8aca9d7e9ba8
|
annotation_statistics.py
|
annotation_statistics.py
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
entity_class = u'EmbodiedEmotions'
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
for file_name in os.listdir(dir_name):
folia_counter += 1
print '({}) {}'.format(folia_counter, file_name)
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
delete = True
for event, elem in context:
if event == 'start' and elem.get('class') == 'act':
delete = False
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e.startswith(entity_class):
emotional = True
stats[e] += 1
if emotional:
num_emotional += 1
delete = True
# clear memory
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '{} sentences in {} files'.format(num_sent, folia_counter)
print '{} emotional sentences'.format(num_emotional)
for tag, freq in stats.most_common():
print '{}\t{}'.format(tag, freq)
|
Add script to calculate annotation statistics
|
Add script to calculate annotation statistics
Added a script that counts the number of annotated entities and
emotional vs. non-emotional sentences.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to calculate annotation statistics
Added a script that counts the number of annotated entities and
emotional vs. non-emotional sentences.
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
entity_class = u'EmbodiedEmotions'
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
for file_name in os.listdir(dir_name):
folia_counter += 1
print '({}) {}'.format(folia_counter, file_name)
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
delete = True
for event, elem in context:
if event == 'start' and elem.get('class') == 'act':
delete = False
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e.startswith(entity_class):
emotional = True
stats[e] += 1
if emotional:
num_emotional += 1
delete = True
# clear memory
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '{} sentences in {} files'.format(num_sent, folia_counter)
print '{} emotional sentences'.format(num_emotional)
for tag, freq in stats.most_common():
print '{}\t{}'.format(tag, freq)
|
<commit_before><commit_msg>Add script to calculate annotation statistics
Added a script that counts the number of annotated entities and
emotional vs. non-emotional sentences.<commit_after>
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
entity_class = u'EmbodiedEmotions'
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
for file_name in os.listdir(dir_name):
folia_counter += 1
print '({}) {}'.format(folia_counter, file_name)
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
delete = True
for event, elem in context:
if event == 'start' and elem.get('class') == 'act':
delete = False
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e.startswith(entity_class):
emotional = True
stats[e] += 1
if emotional:
num_emotional += 1
delete = True
# clear memory
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '{} sentences in {} files'.format(num_sent, folia_counter)
print '{} emotional sentences'.format(num_emotional)
for tag, freq in stats.most_common():
print '{}\t{}'.format(tag, freq)
|
Add script to calculate annotation statistics
Added a script that counts the number of annotated entities and
emotional vs. non-emotional sentences."""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
entity_class = u'EmbodiedEmotions'
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
for file_name in os.listdir(dir_name):
folia_counter += 1
print '({}) {}'.format(folia_counter, file_name)
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
delete = True
for event, elem in context:
if event == 'start' and elem.get('class') == 'act':
delete = False
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e.startswith(entity_class):
emotional = True
stats[e] += 1
if emotional:
num_emotional += 1
delete = True
# clear memory
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '{} sentences in {} files'.format(num_sent, folia_counter)
print '{} emotional sentences'.format(num_emotional)
for tag, freq in stats.most_common():
print '{}\t{}'.format(tag, freq)
|
<commit_before><commit_msg>Add script to calculate annotation statistics
Added a script that counts the number of annotated entities and
emotional vs. non-emotional sentences.<commit_after>"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
entity_class = u'EmbodiedEmotions'
act_tag = '{http://ilk.uvt.nl/folia}div'
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
for file_name in os.listdir(dir_name):
folia_counter += 1
print '({}) {}'.format(folia_counter, file_name)
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
delete = True
for event, elem in context:
if event == 'start' and elem.get('class') == 'act':
delete = False
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e.startswith(entity_class):
emotional = True
stats[e] += 1
if emotional:
num_emotional += 1
delete = True
# clear memory
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '{} sentences in {} files'.format(num_sent, folia_counter)
print '{} emotional sentences'.format(num_emotional)
for tag, freq in stats.most_common():
print '{}\t{}'.format(tag, freq)
|
|
df719f08efdbbadc5694454ffceed21c7c54e8c7
|
tests/test_config_gauge.py
|
tests/test_config_gauge.py
|
#!/usr/bin/env python3
"""Test config parsing"""
import logging
import os
import shutil
import tempfile
import unittest
from faucet import config_parser as cp
LOGNAME = '/dev/null'
class TestGaugeConfig(unittest.TestCase):
"""Test gauge.yaml config parsing."""
DEFAULT_FAUCET_CONFIG = """
dps:
dp1:
dp_id: 1
interfaces:
1:
native_vlan: v1
dp2:
dp_id: 2
interfaces:
1:
native_vlan: v1
vlans:
v1:
vid: 1
"""
GAUGE_CONFIG_HEADER = """
faucet_configs:
- '{}'
"""
def setUp(self):
logging.disable(logging.CRITICAL)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.tmpdir)
def conf_file_name(self, faucet=False):
if faucet:
return os.path.join(self.tmpdir, 'faucet.yaml')
else:
return os.path.join(self.tmpdir, 'gauge.yaml')
def create_config_files(self, config, faucet_config=None):
"""Returns file path to file containing the config parameter."""
gauge_file_name = self.conf_file_name()
faucet_file_name = self.conf_file_name(faucet=True)
with open(gauge_file_name, 'w') as conf_file:
conf_file.write(config.format(faucet_file_name))
with open(faucet_file_name, 'w') as conf_file:
if faucet_config:
conf_file.write(faucet_config)
else:
conf_file.write(self.DEFAULT_FAUCET_CONFIG)
return (gauge_file_name, faucet_file_name)
def get_config(self, conf_suffix):
return self.GAUGE_CONFIG_HEADER + conf_suffix
def test_all_dps(self):
GAUGE_CONF = """
watchers:
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
"""
conf = self.get_config(GAUGE_CONF)
gauge_file, faucet_file = self.create_config_files(conf)
watcher_confs = cp.watcher_parser(gauge_file, 'gauge_config_test', None)
self.assertEqual(len(watcher_confs), 2, 'failed to create config for each dp')
for watcher_conf in watcher_confs:
msg = 'all_dps config not applied to each dp'
self.assertEqual(watcher_conf.type, 'port_stats', msg)
self.assertEqual(watcher_conf.interval, 10, msg)
self.assertEqual(watcher_conf.db_type, 'prometheus', msg)
if __name__ == "__main__":
unittest.main()
|
Add unit tests for gauge config
|
Add unit tests for gauge config
Add test coverage for https://github.com/faucetsdn/faucet/issues/1441
|
Python
|
apache-2.0
|
anarkiwi/faucet,gizmoguy/faucet,shivarammysore/faucet,wackerly/faucet,faucetsdn/faucet,trungdtbk/faucet,mwutzke/faucet,mwutzke/faucet,trentindav/faucet,wackerly/faucet,shivarammysore/faucet,trentindav/faucet,gizmoguy/faucet,REANNZ/faucet,REANNZ/faucet,anarkiwi/faucet,faucetsdn/faucet,trungdtbk/faucet
|
Add unit tests for gauge config
Add test coverage for https://github.com/faucetsdn/faucet/issues/1441
|
#!/usr/bin/env python3
"""Test config parsing"""
import logging
import os
import shutil
import tempfile
import unittest
from faucet import config_parser as cp
LOGNAME = '/dev/null'
class TestGaugeConfig(unittest.TestCase):
"""Test gauge.yaml config parsing."""
DEFAULT_FAUCET_CONFIG = """
dps:
dp1:
dp_id: 1
interfaces:
1:
native_vlan: v1
dp2:
dp_id: 2
interfaces:
1:
native_vlan: v1
vlans:
v1:
vid: 1
"""
GAUGE_CONFIG_HEADER = """
faucet_configs:
- '{}'
"""
def setUp(self):
logging.disable(logging.CRITICAL)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.tmpdir)
def conf_file_name(self, faucet=False):
if faucet:
return os.path.join(self.tmpdir, 'faucet.yaml')
else:
return os.path.join(self.tmpdir, 'gauge.yaml')
def create_config_files(self, config, faucet_config=None):
"""Returns file path to file containing the config parameter."""
gauge_file_name = self.conf_file_name()
faucet_file_name = self.conf_file_name(faucet=True)
with open(gauge_file_name, 'w') as conf_file:
conf_file.write(config.format(faucet_file_name))
with open(faucet_file_name, 'w') as conf_file:
if faucet_config:
conf_file.write(faucet_config)
else:
conf_file.write(self.DEFAULT_FAUCET_CONFIG)
return (gauge_file_name, faucet_file_name)
def get_config(self, conf_suffix):
return self.GAUGE_CONFIG_HEADER + conf_suffix
def test_all_dps(self):
GAUGE_CONF = """
watchers:
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
"""
conf = self.get_config(GAUGE_CONF)
gauge_file, faucet_file = self.create_config_files(conf)
watcher_confs = cp.watcher_parser(gauge_file, 'gauge_config_test', None)
self.assertEqual(len(watcher_confs), 2, 'failed to create config for each dp')
for watcher_conf in watcher_confs:
msg = 'all_dps config not applied to each dp'
self.assertEqual(watcher_conf.type, 'port_stats', msg)
self.assertEqual(watcher_conf.interval, 10, msg)
self.assertEqual(watcher_conf.db_type, 'prometheus', msg)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for gauge config
Add test coverage for https://github.com/faucetsdn/faucet/issues/1441<commit_after>
|
#!/usr/bin/env python3
"""Test config parsing"""
import logging
import os
import shutil
import tempfile
import unittest
from faucet import config_parser as cp
LOGNAME = '/dev/null'
class TestGaugeConfig(unittest.TestCase):
"""Test gauge.yaml config parsing."""
DEFAULT_FAUCET_CONFIG = """
dps:
dp1:
dp_id: 1
interfaces:
1:
native_vlan: v1
dp2:
dp_id: 2
interfaces:
1:
native_vlan: v1
vlans:
v1:
vid: 1
"""
GAUGE_CONFIG_HEADER = """
faucet_configs:
- '{}'
"""
def setUp(self):
logging.disable(logging.CRITICAL)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.tmpdir)
def conf_file_name(self, faucet=False):
if faucet:
return os.path.join(self.tmpdir, 'faucet.yaml')
else:
return os.path.join(self.tmpdir, 'gauge.yaml')
def create_config_files(self, config, faucet_config=None):
"""Returns file path to file containing the config parameter."""
gauge_file_name = self.conf_file_name()
faucet_file_name = self.conf_file_name(faucet=True)
with open(gauge_file_name, 'w') as conf_file:
conf_file.write(config.format(faucet_file_name))
with open(faucet_file_name, 'w') as conf_file:
if faucet_config:
conf_file.write(faucet_config)
else:
conf_file.write(self.DEFAULT_FAUCET_CONFIG)
return (gauge_file_name, faucet_file_name)
def get_config(self, conf_suffix):
return self.GAUGE_CONFIG_HEADER + conf_suffix
def test_all_dps(self):
GAUGE_CONF = """
watchers:
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
"""
conf = self.get_config(GAUGE_CONF)
gauge_file, faucet_file = self.create_config_files(conf)
watcher_confs = cp.watcher_parser(gauge_file, 'gauge_config_test', None)
self.assertEqual(len(watcher_confs), 2, 'failed to create config for each dp')
for watcher_conf in watcher_confs:
msg = 'all_dps config not applied to each dp'
self.assertEqual(watcher_conf.type, 'port_stats', msg)
self.assertEqual(watcher_conf.interval, 10, msg)
self.assertEqual(watcher_conf.db_type, 'prometheus', msg)
if __name__ == "__main__":
unittest.main()
|
Add unit tests for gauge config
Add test coverage for https://github.com/faucetsdn/faucet/issues/1441#!/usr/bin/env python3
"""Test config parsing"""
import logging
import os
import shutil
import tempfile
import unittest
from faucet import config_parser as cp
LOGNAME = '/dev/null'
class TestGaugeConfig(unittest.TestCase):
"""Test gauge.yaml config parsing."""
DEFAULT_FAUCET_CONFIG = """
dps:
dp1:
dp_id: 1
interfaces:
1:
native_vlan: v1
dp2:
dp_id: 2
interfaces:
1:
native_vlan: v1
vlans:
v1:
vid: 1
"""
GAUGE_CONFIG_HEADER = """
faucet_configs:
- '{}'
"""
def setUp(self):
logging.disable(logging.CRITICAL)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.tmpdir)
def conf_file_name(self, faucet=False):
if faucet:
return os.path.join(self.tmpdir, 'faucet.yaml')
else:
return os.path.join(self.tmpdir, 'gauge.yaml')
def create_config_files(self, config, faucet_config=None):
"""Returns file path to file containing the config parameter."""
gauge_file_name = self.conf_file_name()
faucet_file_name = self.conf_file_name(faucet=True)
with open(gauge_file_name, 'w') as conf_file:
conf_file.write(config.format(faucet_file_name))
with open(faucet_file_name, 'w') as conf_file:
if faucet_config:
conf_file.write(faucet_config)
else:
conf_file.write(self.DEFAULT_FAUCET_CONFIG)
return (gauge_file_name, faucet_file_name)
def get_config(self, conf_suffix):
return self.GAUGE_CONFIG_HEADER + conf_suffix
def test_all_dps(self):
GAUGE_CONF = """
watchers:
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
"""
conf = self.get_config(GAUGE_CONF)
gauge_file, faucet_file = self.create_config_files(conf)
watcher_confs = cp.watcher_parser(gauge_file, 'gauge_config_test', None)
self.assertEqual(len(watcher_confs), 2, 'failed to create config for each dp')
for watcher_conf in watcher_confs:
msg = 'all_dps config not applied to each dp'
self.assertEqual(watcher_conf.type, 'port_stats', msg)
self.assertEqual(watcher_conf.interval, 10, msg)
self.assertEqual(watcher_conf.db_type, 'prometheus', msg)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for gauge config
Add test coverage for https://github.com/faucetsdn/faucet/issues/1441<commit_after>#!/usr/bin/env python3
"""Test config parsing"""
import logging
import os
import shutil
import tempfile
import unittest
from faucet import config_parser as cp
LOGNAME = '/dev/null'
class TestGaugeConfig(unittest.TestCase):
"""Test gauge.yaml config parsing."""
DEFAULT_FAUCET_CONFIG = """
dps:
dp1:
dp_id: 1
interfaces:
1:
native_vlan: v1
dp2:
dp_id: 2
interfaces:
1:
native_vlan: v1
vlans:
v1:
vid: 1
"""
GAUGE_CONFIG_HEADER = """
faucet_configs:
- '{}'
"""
def setUp(self):
logging.disable(logging.CRITICAL)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.tmpdir)
def conf_file_name(self, faucet=False):
if faucet:
return os.path.join(self.tmpdir, 'faucet.yaml')
else:
return os.path.join(self.tmpdir, 'gauge.yaml')
def create_config_files(self, config, faucet_config=None):
"""Returns file path to file containing the config parameter."""
gauge_file_name = self.conf_file_name()
faucet_file_name = self.conf_file_name(faucet=True)
with open(gauge_file_name, 'w') as conf_file:
conf_file.write(config.format(faucet_file_name))
with open(faucet_file_name, 'w') as conf_file:
if faucet_config:
conf_file.write(faucet_config)
else:
conf_file.write(self.DEFAULT_FAUCET_CONFIG)
return (gauge_file_name, faucet_file_name)
def get_config(self, conf_suffix):
return self.GAUGE_CONFIG_HEADER + conf_suffix
def test_all_dps(self):
GAUGE_CONF = """
watchers:
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
"""
conf = self.get_config(GAUGE_CONF)
gauge_file, faucet_file = self.create_config_files(conf)
watcher_confs = cp.watcher_parser(gauge_file, 'gauge_config_test', None)
self.assertEqual(len(watcher_confs), 2, 'failed to create config for each dp')
for watcher_conf in watcher_confs:
msg = 'all_dps config not applied to each dp'
self.assertEqual(watcher_conf.type, 'port_stats', msg)
self.assertEqual(watcher_conf.interval, 10, msg)
self.assertEqual(watcher_conf.db_type, 'prometheus', msg)
if __name__ == "__main__":
unittest.main()
|
|
65074a6edb390aaf01aab018f166540d583ee86a
|
clubs/migrations/0010_add_missing_colleges.py
|
clubs/migrations/0010_add_missing_colleges.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
# Variables are named: <city_code>_<college_code>_<gender_code>.
College.objects.create(city='R', section='NG', name='A', gender='F')
College.objects.create(city='R', section='NG', name='P', gender='F')
# Collge of Public Health was created manually on enjazportal.com.
# Let's make sure all other installations have this college as
# well:
if not College.objects.filter(city='R', section='NG', name='I',
gender='M'):
College.objects.create(city='R', section='NG', name='I',
gender='M')
def remove_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
College.objects.get(city='R', section='NG', name='A',
gender='F').delete()
College.objects.get(city='R', section='NG', name='P',
gender='F').delete()
College.objects.get(city='R', section='NG', name='I',
gender='M').delete()
class Migration(migrations.Migration):
dependencies = [
('clubs', '0009_more_sections_and_club_gender'),
]
operations = [
migrations.RunPython(
add_colleges,
reverse_code=remove_colleges),
]
|
Add new and missing colleges.
|
Add new and missing colleges.
|
Python
|
agpl-3.0
|
osamak/student-portal,osamak/student-portal,enjaz/enjaz,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,osamak/student-portal
|
Add new and missing colleges.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
# Variables are named: <city_code>_<college_code>_<gender_code>.
College.objects.create(city='R', section='NG', name='A', gender='F')
College.objects.create(city='R', section='NG', name='P', gender='F')
# Collge of Public Health was created manually on enjazportal.com.
# Let's make sure all other installations have this college as
# well:
if not College.objects.filter(city='R', section='NG', name='I',
gender='M'):
College.objects.create(city='R', section='NG', name='I',
gender='M')
def remove_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
College.objects.get(city='R', section='NG', name='A',
gender='F').delete()
College.objects.get(city='R', section='NG', name='P',
gender='F').delete()
College.objects.get(city='R', section='NG', name='I',
gender='M').delete()
class Migration(migrations.Migration):
dependencies = [
('clubs', '0009_more_sections_and_club_gender'),
]
operations = [
migrations.RunPython(
add_colleges,
reverse_code=remove_colleges),
]
|
<commit_before><commit_msg>Add new and missing colleges.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
# Variables are named: <city_code>_<college_code>_<gender_code>.
College.objects.create(city='R', section='NG', name='A', gender='F')
College.objects.create(city='R', section='NG', name='P', gender='F')
# Collge of Public Health was created manually on enjazportal.com.
# Let's make sure all other installations have this college as
# well:
if not College.objects.filter(city='R', section='NG', name='I',
gender='M'):
College.objects.create(city='R', section='NG', name='I',
gender='M')
def remove_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
College.objects.get(city='R', section='NG', name='A',
gender='F').delete()
College.objects.get(city='R', section='NG', name='P',
gender='F').delete()
College.objects.get(city='R', section='NG', name='I',
gender='M').delete()
class Migration(migrations.Migration):
dependencies = [
('clubs', '0009_more_sections_and_club_gender'),
]
operations = [
migrations.RunPython(
add_colleges,
reverse_code=remove_colleges),
]
|
Add new and missing colleges.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
# Variables are named: <city_code>_<college_code>_<gender_code>.
College.objects.create(city='R', section='NG', name='A', gender='F')
College.objects.create(city='R', section='NG', name='P', gender='F')
# Collge of Public Health was created manually on enjazportal.com.
# Let's make sure all other installations have this college as
# well:
if not College.objects.filter(city='R', section='NG', name='I',
gender='M'):
College.objects.create(city='R', section='NG', name='I',
gender='M')
def remove_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
College.objects.get(city='R', section='NG', name='A',
gender='F').delete()
College.objects.get(city='R', section='NG', name='P',
gender='F').delete()
College.objects.get(city='R', section='NG', name='I',
gender='M').delete()
class Migration(migrations.Migration):
dependencies = [
('clubs', '0009_more_sections_and_club_gender'),
]
operations = [
migrations.RunPython(
add_colleges,
reverse_code=remove_colleges),
]
|
<commit_before><commit_msg>Add new and missing colleges.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
# Variables are named: <city_code>_<college_code>_<gender_code>.
College.objects.create(city='R', section='NG', name='A', gender='F')
College.objects.create(city='R', section='NG', name='P', gender='F')
# Collge of Public Health was created manually on enjazportal.com.
# Let's make sure all other installations have this college as
# well:
if not College.objects.filter(city='R', section='NG', name='I',
gender='M'):
College.objects.create(city='R', section='NG', name='I',
gender='M')
def remove_colleges(apps, schema_editor):
College = apps.get_model('clubs', 'College')
College.objects.get(city='R', section='NG', name='A',
gender='F').delete()
College.objects.get(city='R', section='NG', name='P',
gender='F').delete()
College.objects.get(city='R', section='NG', name='I',
gender='M').delete()
class Migration(migrations.Migration):
dependencies = [
('clubs', '0009_more_sections_and_club_gender'),
]
operations = [
migrations.RunPython(
add_colleges,
reverse_code=remove_colleges),
]
|
|
8cabf2cb2922979bf5dede4e1084978e82df092e
|
apps/submission/forms.py
|
apps/submission/forms.py
|
from django import forms
from django.utils.translation import ugettext as _
from apps.core.models import Tag
from .models import SubmissionProcess
class SubmissionTagsForm(forms.ModelForm):
def _get_tags():
return [(t.name, t.name) for t in Tag.objects.all()]
experiment_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
analysis_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
new_experiment_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
new_analysis_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
class Meta:
model = SubmissionProcess
fields = ['tags', ]
widgets = {
'tags': forms.HiddenInput,
}
|
Add draft submission tags form
|
Add draft submission tags form
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add draft submission tags form
|
from django import forms
from django.utils.translation import ugettext as _
from apps.core.models import Tag
from .models import SubmissionProcess
class SubmissionTagsForm(forms.ModelForm):
def _get_tags():
return [(t.name, t.name) for t in Tag.objects.all()]
experiment_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
analysis_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
new_experiment_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
new_analysis_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
class Meta:
model = SubmissionProcess
fields = ['tags', ]
widgets = {
'tags': forms.HiddenInput,
}
|
<commit_before><commit_msg>Add draft submission tags form<commit_after>
|
from django import forms
from django.utils.translation import ugettext as _
from apps.core.models import Tag
from .models import SubmissionProcess
class SubmissionTagsForm(forms.ModelForm):
def _get_tags():
return [(t.name, t.name) for t in Tag.objects.all()]
experiment_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
analysis_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
new_experiment_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
new_analysis_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
class Meta:
model = SubmissionProcess
fields = ['tags', ]
widgets = {
'tags': forms.HiddenInput,
}
|
Add draft submission tags formfrom django import forms
from django.utils.translation import ugettext as _
from apps.core.models import Tag
from .models import SubmissionProcess
class SubmissionTagsForm(forms.ModelForm):
def _get_tags():
return [(t.name, t.name) for t in Tag.objects.all()]
experiment_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
analysis_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
new_experiment_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
new_analysis_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
class Meta:
model = SubmissionProcess
fields = ['tags', ]
widgets = {
'tags': forms.HiddenInput,
}
|
<commit_before><commit_msg>Add draft submission tags form<commit_after>from django import forms
from django.utils.translation import ugettext as _
from apps.core.models import Tag
from .models import SubmissionProcess
class SubmissionTagsForm(forms.ModelForm):
def _get_tags():
return [(t.name, t.name) for t in Tag.objects.all()]
experiment_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
analysis_tags = forms.ChoiceField(
label=_("Select one or more tags"),
choices=_get_tags,
widget=forms.CheckboxSelectMultiple,
required=False,
)
new_experiment_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
new_analysis_tags = forms.CharField(
label=_("and/or add new tags"),
help_text=_(
"Type coma-separated tags. Tree level separator is the slash. "
"Example: 'NGS, single-cell/RNA' will add the 'NGS' tag and the "
"'RNA' tag that has 'single-cell' as parent tag in the tag tree."
),
widget=forms.TextInput(
attrs={
'placeholder': _("NGS, single-cell/RNA")
},
),
required=False,
)
class Meta:
model = SubmissionProcess
fields = ['tags', ]
widgets = {
'tags': forms.HiddenInput,
}
|
|
1d78e83150373a1417b0aad517e7bf178e3ab633
|
tests/chainer_tests/optimizers_tests/test_optimizers.py
|
tests/chainer_tests/optimizers_tests/test_optimizers.py
|
import unittest
import six
import chainer
from chainer import optimizers
from chainer import testing
@testing.parameterize(*testing.product({
'impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.MomentumSGD,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link(w=())
def create(self, *args, **kwargs):
self.optimizer = self.impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
print('test param "{}"'.format(name))
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
testing.run_module(__name__, __file__)
|
Add a test of using non-default hyperparameter for each optimizer implementation
|
Add a test of using non-default hyperparameter for each optimizer implementation
|
Python
|
mit
|
okuta/chainer,hvy/chainer,chainer/chainer,rezoo/chainer,wkentaro/chainer,wkentaro/chainer,jnishi/chainer,niboshi/chainer,niboshi/chainer,chainer/chainer,keisuke-umezawa/chainer,okuta/chainer,kiyukuta/chainer,okuta/chainer,niboshi/chainer,wkentaro/chainer,wkentaro/chainer,chainer/chainer,okuta/chainer,jnishi/chainer,hvy/chainer,ysekky/chainer,ktnyt/chainer,ktnyt/chainer,hvy/chainer,delta2323/chainer,ktnyt/chainer,jnishi/chainer,hvy/chainer,niboshi/chainer,pfnet/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,aonotas/chainer,ktnyt/chainer,anaruse/chainer,chainer/chainer,keisuke-umezawa/chainer,kashif/chainer,jnishi/chainer,tkerola/chainer,ronekko/chainer
|
Add a test of using non-default hyperparameter for each optimizer implementation
|
import unittest
import six
import chainer
from chainer import optimizers
from chainer import testing
@testing.parameterize(*testing.product({
'impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.MomentumSGD,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link(w=())
def create(self, *args, **kwargs):
self.optimizer = self.impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
print('test param "{}"'.format(name))
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add a test of using non-default hyperparameter for each optimizer implementation<commit_after>
|
import unittest
import six
import chainer
from chainer import optimizers
from chainer import testing
@testing.parameterize(*testing.product({
'impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.MomentumSGD,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link(w=())
def create(self, *args, **kwargs):
self.optimizer = self.impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
print('test param "{}"'.format(name))
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
testing.run_module(__name__, __file__)
|
Add a test of using non-default hyperparameter for each optimizer implementationimport unittest
import six
import chainer
from chainer import optimizers
from chainer import testing
@testing.parameterize(*testing.product({
'impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.MomentumSGD,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link(w=())
def create(self, *args, **kwargs):
self.optimizer = self.impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
print('test param "{}"'.format(name))
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add a test of using non-default hyperparameter for each optimizer implementation<commit_after>import unittest
import six
import chainer
from chainer import optimizers
from chainer import testing
@testing.parameterize(*testing.product({
'impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.MomentumSGD,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link(w=())
def create(self, *args, **kwargs):
self.optimizer = self.impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
print('test param "{}"'.format(name))
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
testing.run_module(__name__, __file__)
|
|
94459df7f3abc81c2b66e4ea8bf60eebf387de31
|
votes/urls.py
|
votes/urls.py
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView
urlpatterns = [
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view()),
]
|
Add URL scheme for votes app
|
Add URL scheme for votes app
|
Python
|
mit
|
kuboschek/jay,kuboschek/jay,kuboschek/jay,OpenJUB/jay,OpenJUB/jay,OpenJUB/jay
|
Add URL scheme for votes app
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView
urlpatterns = [
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view()),
]
|
<commit_before><commit_msg>Add URL scheme for votes app<commit_after>
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView
urlpatterns = [
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view()),
]
|
Add URL scheme for votes appfrom django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView
urlpatterns = [
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view()),
]
|
<commit_before><commit_msg>Add URL scheme for votes app<commit_after>from django.conf.urls import include, url
from django.views.generic import TemplateView
from votes.views import VoteView
urlpatterns = [
url(r'^(?P<vote_name>[\w-]+)$', VoteView.as_view()),
]
|
|
3715583d29374ca8a1d041319385f56431f4e477
|
zephyr/management/commands/expunge_db.py
|
zephyr/management/commands/expunge_db.py
|
from django.core.management.base import BaseCommand
from zephyr.retention_policy import get_UserMessages_to_expunge
from zephyr.models import Message
class Command(BaseCommand):
help = ('Expunge old UserMessages and Messages from the database, '
+ 'according to the retention policy.')
def handle(self, *args, **kwargs):
get_UserMessages_to_expunge().delete()
Message.remove_unreachable()
|
Implement a command to expunge old UserMessages and Messages from the database
|
Implement a command to expunge old UserMessages and Messages from the database
(imported from commit a4873dfa8737c483411d12f30daaebebebf859f9)
|
Python
|
apache-2.0
|
grave-w-grave/zulip,wavelets/zulip,kou/zulip,krtkmj/zulip,alliejones/zulip,tiansiyuan/zulip,jimmy54/zulip,Cheppers/zulip,brockwhittaker/zulip,dnmfarrell/zulip,AZtheAsian/zulip,arpitpanwar/zulip,Galexrt/zulip,jonesgithub/zulip,dxq-git/zulip,proliming/zulip,arpitpanwar/zulip,niftynei/zulip,Gabriel0402/zulip,showell/zulip,natanovia/zulip,Drooids/zulip,brockwhittaker/zulip,Diptanshu8/zulip,Batterfii/zulip,hafeez3000/zulip,willingc/zulip,showell/zulip,krtkmj/zulip,developerfm/zulip,wavelets/zulip,jackrzhang/zulip,bastianh/zulip,adnanh/zulip,timabbott/zulip,luyifan/zulip,glovebx/zulip,brainwane/zulip,krtkmj/zulip,andersk/zulip,ericzhou2008/zulip,dotcool/zulip,isht3/zulip,Drooids/zulip,zachallaun/zulip,sharmaeklavya2/zulip,luyifan/zulip,karamcnair/zulip,ufosky-server/zulip,amanharitsh123/zulip,zulip/zulip,themass/zulip,ApsOps/zulip,AZtheAsian/zulip,jerryge/zulip,Galexrt/zulip,he15his/zulip,KingxBanana/zulip,wangdeshui/zulip,JanzTam/zulip,voidException/zulip,gkotian/zulip,PaulPetring/zulip,peiwei/zulip,bluesea/zulip,jainayush975/zulip,saitodisse/zulip,tiansiyuan/zulip,alliejones/zulip,Gabriel0402/zulip,easyfmxu/zulip,jackrzhang/zulip,ApsOps/zulip,he15his/zulip,sonali0901/zulip,deer-hope/zulip,hengqujushi/zulip,gigawhitlocks/zulip,timabbott/zulip,rht/zulip,dattatreya303/zulip,amyliu345/zulip,armooo/zulip,voidException/zulip,kaiyuanheshang/zulip,armooo/zulip,niftynei/zulip,sonali0901/zulip,moria/zulip,paxapy/zulip,christi3k/zulip,amallia/zulip,mohsenSy/zulip,ryanbackman/zulip,avastu/zulip,andersk/zulip,dawran6/zulip,bastianh/zulip,joshisa/zulip,ahmadassaf/zulip,ApsOps/zulip,adnanh/zulip,ahmadassaf/zulip,JanzTam/zulip,fw1121/zulip,zofuthan/zulip,Vallher/zulip,LAndreas/zulip,wdaher/zulip,tbutter/zulip,mdavid/zulip,Batterfii/zulip,akuseru/zulip,jphilipsen05/zulip,gkotian/zulip,arpith/zulip,Cheppers/zulip,MayB/zulip,yocome/zulip,cosmicAsymmetry/zulip,wavelets/zulip,Qgap/zulip,shaunstanislaus/zulip,amallia/zulip,vikas-parashar/zulip,amallia/zulip,avastu/zulip,showell/zulip,ryansnowboarder/zulip,babbage/zulip,zacps/zulip,Galexrt/zulip,zorojean/zulip,aps-sids/zulip,ashwinirudrappa/zulip,souravbadami/zulip,vikas-parashar/zulip,bluesea/zulip,RobotCaleb/zulip,voidException/zulip,natanovia/zulip,timabbott/zulip,RobotCaleb/zulip,stamhe/zulip,bluesea/zulip,udxxabp/zulip,glovebx/zulip,johnny9/zulip,swinghu/zulip,souravbadami/zulip,MayB/zulip,synicalsyntax/zulip,dhcrzf/zulip,udxxabp/zulip,so0k/zulip,bastianh/zulip,xuanhan863/zulip,m1ssou/zulip,kaiyuanheshang/zulip,grave-w-grave/zulip,luyifan/zulip,peguin40/zulip,alliejones/zulip,jrowan/zulip,hackerkid/zulip,luyifan/zulip,bitemyapp/zulip,isht3/zulip,MariaFaBella85/zulip,ApsOps/zulip,joyhchen/zulip,Galexrt/zulip,aps-sids/zulip,vabs22/zulip,johnny9/zulip,sup95/zulip,ryansnowboarder/zulip,saitodisse/zulip,tdr130/zulip,samatdav/zulip,LeeRisk/zulip,firstblade/zulip,jeffcao/zulip,peiwei/zulip,dnmfarrell/zulip,LAndreas/zulip,LeeRisk/zulip,mdavid/zulip,ahmadassaf/zulip,Frouk/zulip,LeeRisk/zulip,so0k/zulip,rishig/zulip,wweiradio/zulip,zhaoweigg/zulip,noroot/zulip,gkotian/zulip,vabs22/zulip,Frouk/zulip,jessedhillon/zulip,xuxiao/zulip,mahim97/zulip,pradiptad/zulip,xuanhan863/zulip,deer-hope/zulip,mohsenSy/zulip,andersk/zulip,peguin40/zulip,shubhamdhama/zulip,shubhamdhama/zulip,ufosky-server/zulip,levixie/zulip,levixie/zulip,amanharitsh123/zulip,avastu/zulip,MayB/zulip,shaunstanislaus/zulip,brainwane/zulip,eastlhu/zulip,calvinleenyc/zulip,PhilSk/zulip,LAndreas/zulip,MariaFaBella85/zulip,verma-varsha/zulip,dawran6/zulip,AZtheAsian/zulip,dxq-git/zulip,shubhamdhama/zulip,he15his/zulip,esander91/zulip,bowlofstew/zulip,EasonYi/zulip,alliejones/zulip,Batterfii/zulip,suxinde2009/zulip,lfranchi/zulip,hayderimran7/zulip,zacps/zulip,esander91/zulip,joyhchen/zulip,Diptanshu8/zulip,peguin40/zulip,cosmicAsymmetry/zulip,Juanvulcano/zulip,codeKonami/zulip,vaidap/zulip,ericzhou2008/zulip,mahim97/zulip,mansilladev/zulip,reyha/zulip,hafeez3000/zulip,Vallher/zulip,vaidap/zulip,andersk/zulip,showell/zulip,ryanbackman/zulip,brainwane/zulip,Qgap/zulip,Suninus/zulip,zulip/zulip,tiansiyuan/zulip,kou/zulip,umkay/zulip,stamhe/zulip,bitemyapp/zulip,nicholasbs/zulip,arpith/zulip,voidException/zulip,zhaoweigg/zulip,Frouk/zulip,udxxabp/zulip,calvinleenyc/zulip,so0k/zulip,xuanhan863/zulip,technicalpickles/zulip,willingc/zulip,isht3/zulip,souravbadami/zulip,Qgap/zulip,PaulPetring/zulip,akuseru/zulip,joyhchen/zulip,Jianchun1/zulip,mdavid/zulip,Batterfii/zulip,mahim97/zulip,bitemyapp/zulip,ApsOps/zulip,jimmy54/zulip,ryansnowboarder/zulip,joyhchen/zulip,tbutter/zulip,xuxiao/zulip,jerryge/zulip,babbage/zulip,ApsOps/zulip,bssrdf/zulip,gigawhitlocks/zulip,souravbadami/zulip,blaze225/zulip,willingc/zulip,joyhchen/zulip,timabbott/zulip,alliejones/zulip,zhaoweigg/zulip,levixie/zulip,technicalpickles/zulip,swinghu/zulip,dawran6/zulip,suxinde2009/zulip,vaidap/zulip,fw1121/zulip,dotcool/zulip,bssrdf/zulip,littledogboy/zulip,amallia/zulip,luyifan/zulip,shrikrishnaholla/zulip,luyifan/zulip,atomic-labs/zulip,wavelets/zulip,wweiradio/zulip,JanzTam/zulip,jphilipsen05/zulip,avastu/zulip,dxq-git/zulip,TigorC/zulip,AZtheAsian/zulip,bowlofstew/zulip,verma-varsha/zulip,Vallher/zulip,qq1012803704/zulip,qq1012803704/zulip,esander91/zulip,so0k/zulip,amanharitsh123/zulip,christi3k/zulip,susansls/zulip,shrikrishnaholla/zulip,ipernet/zulip,karamcnair/zulip,adnanh/zulip,KJin99/zulip,wweiradio/zulip,avastu/zulip,punchagan/zulip,christi3k/zulip,jrowan/zulip,aakash-cr7/zulip,vikas-parashar/zulip,KJin99/zulip,yocome/zulip,zwily/zulip,dawran6/zulip,technicalpickles/zulip,wdaher/zulip,rht/zulip,sonali0901/zulip,ashwinirudrappa/zulip,johnnygaddarr/zulip,yocome/zulip,dnmfarrell/zulip,themass/zulip,isht3/zulip,paxapy/zulip,jimmy54/zulip,vikas-parashar/zulip,ikasumiwt/zulip,saitodisse/zulip,natanovia/zulip,DazWorrall/zulip,zulip/zulip,sonali0901/zulip,bluesea/zulip,zofuthan/zulip,fw1121/zulip,JanzTam/zulip,xuxiao/zulip,atomic-labs/zulip,brockwhittaker/zulip,MariaFaBella85/zulip,jrowan/zulip,hafeez3000/zulip,tdr130/zulip,Diptanshu8/zulip,hayderimran7/zulip,samatdav/zulip,hafeez3000/zulip,xuanhan863/zulip,so0k/zulip,jackrzhang/zulip,jessedhillon/zulip,krtkmj/zulip,johnnygaddarr/zulip,peiwei/zulip,firstblade/zulip,eeshangarg/zulip,tdr130/zulip,brockwhittaker/zulip,pradiptad/zulip,ahmadassaf/zulip,mohsenSy/zulip,SmartPeople/zulip,RobotCaleb/zulip,susansls/zulip,alliejones/zulip,saitodisse/zulip,saitodisse/zulip,Diptanshu8/zulip,reyha/zulip,j831/zulip,bowlofstew/zulip,Suninus/zulip,wangdeshui/zulip,deer-hope/zulip,seapasulli/zulip,zwily/zulip,sharmaeklavya2/zulip,ashwinirudrappa/zulip,moria/zulip,jrowan/zulip,gkotian/zulip,hengqujushi/zulip,brainwane/zulip,ikasumiwt/zulip,themass/zulip,mdavid/zulip,arpitpanwar/zulip,armooo/zulip,PhilSk/zulip,souravbadami/zulip,stamhe/zulip,brockwhittaker/zulip,hj3938/zulip,wdaher/zulip,susansls/zulip,zofuthan/zulip,Galexrt/zulip,schatt/zulip,Frouk/zulip,zofuthan/zulip,ryanbackman/zulip,LAndreas/zulip,umkay/zulip,zhaoweigg/zulip,tiansiyuan/zulip,jonesgithub/zulip,easyfmxu/zulip,j831/zulip,he15his/zulip,KingxBanana/zulip,hafeez3000/zulip,vaidap/zulip,hj3938/zulip,zachallaun/zulip,Vallher/zulip,Suninus/zulip,EasonYi/zulip,developerfm/zulip,noroot/zulip,ikasumiwt/zulip,umkay/zulip,zacps/zulip,vakila/zulip,ufosky-server/zulip,calvinleenyc/zulip,hayderimran7/zulip,thomasboyt/zulip,moria/zulip,kou/zulip,grave-w-grave/zulip,arpitpanwar/zulip,themass/zulip,ryansnowboarder/zulip,m1ssou/zulip,voidException/zulip,peguin40/zulip,dotcool/zulip,wangdeshui/zulip,m1ssou/zulip,nicholasbs/zulip,jainayush975/zulip,amyliu345/zulip,SmartPeople/zulip,dnmfarrell/zulip,ufosky-server/zulip,SmartPeople/zulip,proliming/zulip,huangkebo/zulip,wdaher/zulip,jackrzhang/zulip,zorojean/zulip,gigawhitlocks/zulip,proliming/zulip,johnny9/zulip,hackerkid/zulip,aliceriot/zulip,EasonYi/zulip,bastianh/zulip,babbage/zulip,JPJPJPOPOP/zulip,eastlhu/zulip,themass/zulip,jeffcao/zulip,RobotCaleb/zulip,aakash-cr7/zulip,mahim97/zulip,hackerkid/zulip,Batterfii/zulip,codeKonami/zulip,jonesgithub/zulip,adnanh/zulip,shubhamdhama/zulip,developerfm/zulip,brockwhittaker/zulip,hafeez3000/zulip,proliming/zulip,vakila/zulip,zorojean/zulip,ericzhou2008/zulip,zachallaun/zulip,lfranchi/zulip,glovebx/zulip,AZtheAsian/zulip,aliceriot/zulip,bowlofstew/zulip,Drooids/zulip,zhaoweigg/zulip,kokoar/zulip,mansilladev/zulip,sharmaeklavya2/zulip,swinghu/zulip,MariaFaBella85/zulip,mansilladev/zulip,wavelets/zulip,MayB/zulip,stamhe/zulip,KingxBanana/zulip,udxxabp/zulip,guiquanz/zulip,zulip/zulip,TigorC/zulip,dwrpayne/zulip,aliceriot/zulip,jonesgithub/zulip,umkay/zulip,willingc/zulip,bluesea/zulip,johnny9/zulip,shrikrishnaholla/zulip,natanovia/zulip,esander91/zulip,KJin99/zulip,Qgap/zulip,rht/zulip,tommyip/zulip,Frouk/zulip,tommyip/zulip,tbutter/zulip,jessedhillon/zulip,arpith/zulip,tiansiyuan/zulip,codeKonami/zulip,firstblade/zulip,jessedhillon/zulip,kokoar/zulip,PaulPetring/zulip,susansls/zulip,peiwei/zulip,tommyip/zulip,ericzhou2008/zulip,udxxabp/zulip,itnihao/zulip,jimmy54/zulip,verma-varsha/zulip,eeshangarg/zulip,proliming/zulip,krtkmj/zulip,kaiyuanheshang/zulip,jonesgithub/zulip,Juanvulcano/zulip,Drooids/zulip,DazWorrall/zulip,verma-varsha/zulip,dattatreya303/zulip,huangkebo/zulip,akuseru/zulip,mahim97/zulip,ryanbackman/zulip,akuseru/zulip,andersk/zulip,blaze225/zulip,m1ssou/zulip,PhilSk/zulip,wweiradio/zulip,zachallaun/zulip,thomasboyt/zulip,firstblade/zulip,adnanh/zulip,firstblade/zulip,dotcool/zulip,tdr130/zulip,Batterfii/zulip,verma-varsha/zulip,dattatreya303/zulip,aps-sids/zulip,zacps/zulip,itnihao/zulip,karamcnair/zulip,zhaoweigg/zulip,swinghu/zulip,seapasulli/zulip,jainayush975/zulip,mohsenSy/zulip,wdaher/zulip,vabs22/zulip,ryanbackman/zulip,aps-sids/zulip,dxq-git/zulip,moria/zulip,jeffcao/zulip,ipernet/zulip,fw1121/zulip,Qgap/zulip,suxinde2009/zulip,samatdav/zulip,Cheppers/zulip,grave-w-grave/zulip,cosmicAsymmetry/zulip,wweiradio/zulip,mansilladev/zulip,dxq-git/zulip,huangkebo/zulip,huangkebo/zulip,MariaFaBella85/zulip,amallia/zulip,kaiyuanheshang/zulip,dnmfarrell/zulip,hj3938/zulip,RobotCaleb/zulip,paxapy/zulip,natanovia/zulip,udxxabp/zulip,babbage/zulip,shaunstanislaus/zulip,shubhamdhama/zulip,schatt/zulip,dattatreya303/zulip,tbutter/zulip,vakila/zulip,guiquanz/zulip,sonali0901/zulip,eeshangarg/zulip,peguin40/zulip,paxapy/zulip,hustlzp/zulip,littledogboy/zulip,joyhchen/zulip,noroot/zulip,wdaher/zulip,aps-sids/zulip,xuanhan863/zulip,tommyip/zulip,swinghu/zulip,LeeRisk/zulip,jainayush975/zulip,moria/zulip,joshisa/zulip,PaulPetring/zulip,Cheppers/zulip,hustlzp/zulip,themass/zulip,levixie/zulip,bitemyapp/zulip,guiquanz/zulip,zorojean/zulip,arpith/zulip,natanovia/zulip,shaunstanislaus/zulip,themass/zulip,zorojean/zulip,he15his/zulip,Jianchun1/zulip,jimmy54/zulip,dwrpayne/zulip,PaulPetring/zulip,yocome/zulip,mdavid/zulip,proliming/zulip,itnihao/zulip,aliceriot/zulip,kou/zulip,sharmaeklavya2/zulip,dwrpayne/zulip,codeKonami/zulip,vakila/zulip,nicholasbs/zulip,jainayush975/zulip,proliming/zulip,m1ssou/zulip,Cheppers/zulip,amanharitsh123/zulip,dattatreya303/zulip,huangkebo/zulip,ryanbackman/zulip,schatt/zulip,wangdeshui/zulip,aps-sids/zulip,joshisa/zulip,zwily/zulip,gigawhitlocks/zulip,KingxBanana/zulip,xuxiao/zulip,littledogboy/zulip,rishig/zulip,huangkebo/zulip,cosmicAsymmetry/zulip,littledogboy/zulip,luyifan/zulip,hackerkid/zulip,Juanvulcano/zulip,umkay/zulip,LAndreas/zulip,Frouk/zulip,peiwei/zulip,zorojean/zulip,jackrzhang/zulip,KJin99/zulip,rishig/zulip,willingc/zulip,susansls/zulip,voidException/zulip,eastlhu/zulip,wavelets/zulip,Vallher/zulip,armooo/zulip,SmartPeople/zulip,niftynei/zulip,wdaher/zulip,esander91/zulip,Diptanshu8/zulip,dwrpayne/zulip,ericzhou2008/zulip,punchagan/zulip,xuxiao/zulip,dwrpayne/zulip,eastlhu/zulip,Jianchun1/zulip,samatdav/zulip,KingxBanana/zulip,m1ssou/zulip,rht/zulip,mahim97/zulip,guiquanz/zulip,akuseru/zulip,mohsenSy/zulip,jphilipsen05/zulip,hj3938/zulip,thomasboyt/zulip,jimmy54/zulip,sup95/zulip,EasonYi/zulip,jrowan/zulip,Juanvulcano/zulip,niftynei/zulip,johnny9/zulip,dattatreya303/zulip,zhaoweigg/zulip,tbutter/zulip,niftynei/zulip,voidException/zulip,johnny9/zulip,gkotian/zulip,eastlhu/zulip,moria/zulip,atomic-labs/zulip,dhcrzf/zulip,bssrdf/zulip,DazWorrall/zulip,samatdav/zulip,JPJPJPOPOP/zulip,kou/zulip,Suninus/zulip,levixie/zulip,atomic-labs/zulip,j831/zulip,PhilSk/zulip,gigawhitlocks/zulip,synicalsyntax/zulip,zofuthan/zulip,tbutter/zulip,ashwinirudrappa/zulip,kokoar/zulip,JanzTam/zulip,tommyip/zulip,amanharitsh123/zulip,ipernet/zulip,lfranchi/zulip,hengqujushi/zulip,jerryge/zulip,ryansnowboarder/zulip,technicalpickles/zulip,ufosky-server/zulip,vakila/zulip,armooo/zulip,yocome/zulip,hackerkid/zulip,codeKonami/zulip,kou/zulip,alliejones/zulip,Diptanshu8/zulip,dwrpayne/zulip,RobotCaleb/zulip,johnnygaddarr/zulip,qq1012803704/zulip,jessedhillon/zulip,TigorC/zulip,ashwinirudrappa/zulip,yocome/zulip,hengqujushi/zulip,jackrzhang/zulip,Vallher/zulip,shrikrishnaholla/zulip,joshisa/zulip,bastianh/zulip,itnihao/zulip,synicalsyntax/zulip,timabbott/zulip,christi3k/zulip,EasonYi/zulip,timabbott/zulip,codeKonami/zulip,KJin99/zulip,pradiptad/zulip,dhcrzf/zulip,zofuthan/zulip,hj3938/zulip,ipernet/zulip,praveenaki/zulip,LeeRisk/zulip,TigorC/zulip,Cheppers/zulip,bowlofstew/zulip,Cheppers/zulip,christi3k/zulip,ikasumiwt/zulip,RobotCaleb/zulip,saitodisse/zulip,stamhe/zulip,synicalsyntax/zulip,punchagan/zulip,bssrdf/zulip,amyliu345/zulip,johnny9/zulip,xuanhan863/zulip,reyha/zulip,sharmaeklavya2/zulip,adnanh/zulip,zwily/zulip,fw1121/zulip,dnmfarrell/zulip,praveenaki/zulip,praveenaki/zulip,ikasumiwt/zulip,nicholasbs/zulip,TigorC/zulip,so0k/zulip,grave-w-grave/zulip,reyha/zulip,aakash-cr7/zulip,jimmy54/zulip,samatdav/zulip,amyliu345/zulip,shrikrishnaholla/zulip,vakila/zulip,rishig/zulip,arpith/zulip,johnnygaddarr/zulip,vabs22/zulip,MariaFaBella85/zulip,kokoar/zulip,SmartPeople/zulip,LeeRisk/zulip,firstblade/zulip,jeffcao/zulip,itnihao/zulip,karamcnair/zulip,avastu/zulip,vabs22/zulip,EasonYi/zulip,Gabriel0402/zulip,developerfm/zulip,deer-hope/zulip,umkay/zulip,DazWorrall/zulip,Drooids/zulip,arpitpanwar/zulip,dhcrzf/zulip,jerryge/zulip,gigawhitlocks/zulip,bssrdf/zulip,niftynei/zulip,JanzTam/zulip,mansilladev/zulip,karamcnair/zulip,noroot/zulip,tdr130/zulip,ahmadassaf/zulip,seapasulli/zulip,bluesea/zulip,mdavid/zulip,deer-hope/zulip,punchagan/zulip,ahmadassaf/zulip,hengqujushi/zulip,stamhe/zulip,susansls/zulip,KJin99/zulip,hj3938/zulip,so0k/zulip,Qgap/zulip,jphilipsen05/zulip,joshisa/zulip,peiwei/zulip,noroot/zulip,huangkebo/zulip,moria/zulip,isht3/zulip,MariaFaBella85/zulip,gkotian/zulip,Suninus/zulip,arpitpanwar/zulip,easyfmxu/zulip,tommyip/zulip,dhcrzf/zulip,wavelets/zulip,itnihao/zulip,cosmicAsymmetry/zulip,dawran6/zulip,vikas-parashar/zulip,esander91/zulip,yuvipanda/zulip,krtkmj/zulip,Jianchun1/zulip,zwily/zulip,Frouk/zulip,technicalpickles/zulip,xuxiao/zulip,hustlzp/zulip,technicalpickles/zulip,ashwinirudrappa/zulip,ApsOps/zulip,calvinleenyc/zulip,Gabriel0402/zulip,m1ssou/zulip,vakila/zulip,tiansiyuan/zulip,bowlofstew/zulip,pradiptad/zulip,punchagan/zulip,jrowan/zulip,itnihao/zulip,jphilipsen05/zulip,natanovia/zulip,hustlzp/zulip,paxapy/zulip,wweiradio/zulip,arpith/zulip,seapasulli/zulip,suxinde2009/zulip,blaze225/zulip,willingc/zulip,KingxBanana/zulip,johnnygaddarr/zulip,seapasulli/zulip,kaiyuanheshang/zulip,yuvipanda/zulip,eastlhu/zulip,hj3938/zulip,dawran6/zulip,rht/zulip,akuseru/zulip,shaunstanislaus/zulip,timabbott/zulip,LAndreas/zulip,calvinleenyc/zulip,jessedhillon/zulip,easyfmxu/zulip,synicalsyntax/zulip,SmartPeople/zulip,jerryge/zulip,peiwei/zulip,bastianh/zulip,krtkmj/zulip,ikasumiwt/zulip,zorojean/zulip,udxxabp/zulip,aakash-cr7/zulip,ryansnowboarder/zulip,dotcool/zulip,dotcool/zulip,cosmicAsymmetry/zulip,punchagan/zulip,nicholasbs/zulip,qq1012803704/zulip,rht/zulip,zacps/zulip,praveenaki/zulip,praveenaki/zulip,PhilSk/zulip,punchagan/zulip,bluesea/zulip,aakash-cr7/zulip,souravbadami/zulip,LAndreas/zulip,sup95/zulip,kaiyuanheshang/zulip,praveenaki/zulip,aliceriot/zulip,JPJPJPOPOP/zulip,rishig/zulip,shubhamdhama/zulip,Gabriel0402/zulip,swinghu/zulip,showell/zulip,wangdeshui/zulip,sup95/zulip,paxapy/zulip,amallia/zulip,j831/zulip,shrikrishnaholla/zulip,nicholasbs/zulip,Qgap/zulip,seapasulli/zulip,PhilSk/zulip,easyfmxu/zulip,zulip/zulip,suxinde2009/zulip,dhcrzf/zulip,developerfm/zulip,fw1121/zulip,xuxiao/zulip,armooo/zulip,kokoar/zulip,hustlzp/zulip,tdr130/zulip,zulip/zulip,ipernet/zulip,gkotian/zulip,j831/zulip,hustlzp/zulip,sharmaeklavya2/zulip,saitodisse/zulip,wweiradio/zulip,Galexrt/zulip,zachallaun/zulip,dxq-git/zulip,dotcool/zulip,babbage/zulip,showell/zulip,hengqujushi/zulip,xuanhan863/zulip,andersk/zulip,eeshangarg/zulip,brainwane/zulip,aliceriot/zulip,littledogboy/zulip,levixie/zulip,eeshangarg/zulip,PaulPetring/zulip,fw1121/zulip,ericzhou2008/zulip,tiansiyuan/zulip,Gabriel0402/zulip,bastianh/zulip,Galexrt/zulip,guiquanz/zulip,akuseru/zulip,hayderimran7/zulip,rishig/zulip,dwrpayne/zulip,johnnygaddarr/zulip,shubhamdhama/zulip,aakash-cr7/zulip,LeeRisk/zulip,hustlzp/zulip,hayderimran7/zulip,lfranchi/zulip,shrikrishnaholla/zulip,wangdeshui/zulip,levixie/zulip,yuvipanda/zulip,joshisa/zulip,deer-hope/zulip,bssrdf/zulip,KJin99/zulip,yuvipanda/zulip,TigorC/zulip,developerfm/zulip,hafeez3000/zulip,ericzhou2008/zulip,MayB/zulip,Suninus/zulip,swinghu/zulip,vaidap/zulip,AZtheAsian/zulip,Juanvulcano/zulip,suxinde2009/zulip,pradiptad/zulip,thomasboyt/zulip,schatt/zulip,developerfm/zulip,tbutter/zulip,jphilipsen05/zulip,zulip/zulip,glovebx/zulip,tdr130/zulip,schatt/zulip,j831/zulip,wangdeshui/zulip,rishig/zulip,Batterfii/zulip,adnanh/zulip,mansilladev/zulip,DazWorrall/zulip,Juanvulcano/zulip,firstblade/zulip,blaze225/zulip,joshisa/zulip,ufosky-server/zulip,blaze225/zulip,thomasboyt/zulip,seapasulli/zulip,verma-varsha/zulip,thomasboyt/zulip,hayderimran7/zulip,grave-w-grave/zulip,eeshangarg/zulip,qq1012803704/zulip,atomic-labs/zulip,mansilladev/zulip,JanzTam/zulip,esander91/zulip,karamcnair/zulip,ryansnowboarder/zulip,ipernet/zulip,amallia/zulip,MayB/zulip,zacps/zulip,codeKonami/zulip,atomic-labs/zulip,babbage/zulip,technicalpickles/zulip,karamcnair/zulip,hayderimran7/zulip,thomasboyt/zulip,deer-hope/zulip,brainwane/zulip,JPJPJPOPOP/zulip,amyliu345/zulip,yuvipanda/zulip,kou/zulip,jonesgithub/zulip,yocome/zulip,MayB/zulip,isht3/zulip,christi3k/zulip,jessedhillon/zulip,kokoar/zulip,amyliu345/zulip,bitemyapp/zulip,andersk/zulip,zachallaun/zulip,mohsenSy/zulip,Jianchun1/zulip,willingc/zulip,eeshangarg/zulip,ikasumiwt/zulip,he15his/zulip,guiquanz/zulip,shaunstanislaus/zulip,yuvipanda/zulip,tommyip/zulip,jeffcao/zulip,hackerkid/zulip,vikas-parashar/zulip,atomic-labs/zulip,rht/zulip,synicalsyntax/zulip,bssrdf/zulip,DazWorrall/zulip,lfranchi/zulip,praveenaki/zulip,Jianchun1/zulip,jainayush975/zulip,suxinde2009/zulip,jerryge/zulip,glovebx/zulip,sup95/zulip,littledogboy/zulip,zwily/zulip,amanharitsh123/zulip,Drooids/zulip,jeffcao/zulip,ashwinirudrappa/zulip,sup95/zulip,littledogboy/zulip,jerryge/zulip,qq1012803704/zulip,JPJPJPOPOP/zulip,aliceriot/zulip,schatt/zulip,lfranchi/zulip,arpitpanwar/zulip,jeffcao/zulip,johnnygaddarr/zulip,babbage/zulip,ahmadassaf/zulip,armooo/zulip,aps-sids/zulip,calvinleenyc/zulip,glovebx/zulip,dhcrzf/zulip,Suninus/zulip,dxq-git/zulip,showell/zulip,DazWorrall/zulip,schatt/zulip,noroot/zulip,bowlofstew/zulip,zwily/zulip,vabs22/zulip,vaidap/zulip,avastu/zulip,bitemyapp/zulip,EasonYi/zulip,dnmfarrell/zulip,guiquanz/zulip,umkay/zulip,hackerkid/zulip,Drooids/zulip,PaulPetring/zulip,reyha/zulip,kokoar/zulip,qq1012803704/zulip,he15his/zulip,yuvipanda/zulip,mdavid/zulip,noroot/zulip,nicholasbs/zulip,lfranchi/zulip,brainwane/zulip,blaze225/zulip,synicalsyntax/zulip,pradiptad/zulip,reyha/zulip,jonesgithub/zulip,easyfmxu/zulip,kaiyuanheshang/zulip,jackrzhang/zulip,easyfmxu/zulip,JPJPJPOPOP/zulip,ufosky-server/zulip,stamhe/zulip,peguin40/zulip,ipernet/zulip,glovebx/zulip,Vallher/zulip,gigawhitlocks/zulip,bitemyapp/zulip,sonali0901/zulip,eastlhu/zulip,Gabriel0402/zulip,shaunstanislaus/zulip,pradiptad/zulip,hengqujushi/zulip,zachallaun/zulip,zofuthan/zulip
|
Implement a command to expunge old UserMessages and Messages from the database
(imported from commit a4873dfa8737c483411d12f30daaebebebf859f9)
|
from django.core.management.base import BaseCommand
from zephyr.retention_policy import get_UserMessages_to_expunge
from zephyr.models import Message
class Command(BaseCommand):
help = ('Expunge old UserMessages and Messages from the database, '
+ 'according to the retention policy.')
def handle(self, *args, **kwargs):
get_UserMessages_to_expunge().delete()
Message.remove_unreachable()
|
<commit_before><commit_msg>Implement a command to expunge old UserMessages and Messages from the database
(imported from commit a4873dfa8737c483411d12f30daaebebebf859f9)<commit_after>
|
from django.core.management.base import BaseCommand
from zephyr.retention_policy import get_UserMessages_to_expunge
from zephyr.models import Message
class Command(BaseCommand):
help = ('Expunge old UserMessages and Messages from the database, '
+ 'according to the retention policy.')
def handle(self, *args, **kwargs):
get_UserMessages_to_expunge().delete()
Message.remove_unreachable()
|
Implement a command to expunge old UserMessages and Messages from the database
(imported from commit a4873dfa8737c483411d12f30daaebebebf859f9)from django.core.management.base import BaseCommand
from zephyr.retention_policy import get_UserMessages_to_expunge
from zephyr.models import Message
class Command(BaseCommand):
help = ('Expunge old UserMessages and Messages from the database, '
+ 'according to the retention policy.')
def handle(self, *args, **kwargs):
get_UserMessages_to_expunge().delete()
Message.remove_unreachable()
|
<commit_before><commit_msg>Implement a command to expunge old UserMessages and Messages from the database
(imported from commit a4873dfa8737c483411d12f30daaebebebf859f9)<commit_after>from django.core.management.base import BaseCommand
from zephyr.retention_policy import get_UserMessages_to_expunge
from zephyr.models import Message
class Command(BaseCommand):
help = ('Expunge old UserMessages and Messages from the database, '
+ 'according to the retention policy.')
def handle(self, *args, **kwargs):
get_UserMessages_to_expunge().delete()
Message.remove_unreachable()
|
|
523e818a9391151ce6f236ae44d558d2ee779851
|
pcm2wav.py
|
pcm2wav.py
|
#!/usr/bin/env python
import re
import os
import sys
import wave
if len(sys.argv) != 2:
print "Usage:%s pcm file" % (sys.argv[0])
print "A wave will be created use same name. For example, input file is a.pcm, a.wav will be generated"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "input param is not a file"
sys.exit(1)
fA = re.split("[.]", sys.argv[1])
if fA[-1] != "pcm":
print "input file is not a pcm file"
sys.exit(1)
pcmf = open(sys.argv[1], 'rb')
pcmdata = pcmf.read()
pcmf.close()
wavName = ".".join(fA[:-1])
wavName += ".wav"
wavfile = wave.open(wavName, 'wb')
wavfile.setparams((1, 2, 22050, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
wavfile.close()
|
Convert pcm files to wav files.
|
Convert pcm files to wav files.
|
Python
|
mit
|
JeffpanUK/NuPyTools,JeffpanUK/NuPyTools
|
Convert pcm files to wav files.
|
#!/usr/bin/env python
import re
import os
import sys
import wave
if len(sys.argv) != 2:
print "Usage:%s pcm file" % (sys.argv[0])
print "A wave will be created use same name. For example, input file is a.pcm, a.wav will be generated"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "input param is not a file"
sys.exit(1)
fA = re.split("[.]", sys.argv[1])
if fA[-1] != "pcm":
print "input file is not a pcm file"
sys.exit(1)
pcmf = open(sys.argv[1], 'rb')
pcmdata = pcmf.read()
pcmf.close()
wavName = ".".join(fA[:-1])
wavName += ".wav"
wavfile = wave.open(wavName, 'wb')
wavfile.setparams((1, 2, 22050, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
wavfile.close()
|
<commit_before><commit_msg>Convert pcm files to wav files.<commit_after>
|
#!/usr/bin/env python
import re
import os
import sys
import wave
if len(sys.argv) != 2:
print "Usage:%s pcm file" % (sys.argv[0])
print "A wave will be created use same name. For example, input file is a.pcm, a.wav will be generated"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "input param is not a file"
sys.exit(1)
fA = re.split("[.]", sys.argv[1])
if fA[-1] != "pcm":
print "input file is not a pcm file"
sys.exit(1)
pcmf = open(sys.argv[1], 'rb')
pcmdata = pcmf.read()
pcmf.close()
wavName = ".".join(fA[:-1])
wavName += ".wav"
wavfile = wave.open(wavName, 'wb')
wavfile.setparams((1, 2, 22050, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
wavfile.close()
|
Convert pcm files to wav files.#!/usr/bin/env python
import re
import os
import sys
import wave
if len(sys.argv) != 2:
print "Usage:%s pcm file" % (sys.argv[0])
print "A wave will be created use same name. For example, input file is a.pcm, a.wav will be generated"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "input param is not a file"
sys.exit(1)
fA = re.split("[.]", sys.argv[1])
if fA[-1] != "pcm":
print "input file is not a pcm file"
sys.exit(1)
pcmf = open(sys.argv[1], 'rb')
pcmdata = pcmf.read()
pcmf.close()
wavName = ".".join(fA[:-1])
wavName += ".wav"
wavfile = wave.open(wavName, 'wb')
wavfile.setparams((1, 2, 22050, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
wavfile.close()
|
<commit_before><commit_msg>Convert pcm files to wav files.<commit_after>#!/usr/bin/env python
import re
import os
import sys
import wave
if len(sys.argv) != 2:
print "Usage:%s pcm file" % (sys.argv[0])
print "A wave will be created use same name. For example, input file is a.pcm, a.wav will be generated"
sys.exit(1)
if not os.path.isfile(sys.argv[1]):
print "input param is not a file"
sys.exit(1)
fA = re.split("[.]", sys.argv[1])
if fA[-1] != "pcm":
print "input file is not a pcm file"
sys.exit(1)
pcmf = open(sys.argv[1], 'rb')
pcmdata = pcmf.read()
pcmf.close()
wavName = ".".join(fA[:-1])
wavName += ".wav"
wavfile = wave.open(wavName, 'wb')
wavfile.setparams((1, 2, 22050, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
wavfile.close()
|
|
188d59f6a37cd5b8420bdcf93a5c5dd51493d95d
|
tests/test_pipeline_genome.py
|
tests/test_pipeline_genome.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_genome import process_genome
@pytest.mark.genome
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_handle = process_genome()
genome_files, genome_meta = genome_handle.run(
[resource_path + 'macs2.Human.GCA_000001405.22.fasta'],
{'assembly' : 'GRCh38'},
[]
)
print(genome_files[6:11])
# Add tests for all files created
for f_out in genome_files:
print("GENOME RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for genome indexing
|
Test the pipeline code for genome indexing
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq
|
Test the pipeline code for genome indexing
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_genome import process_genome
@pytest.mark.genome
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_handle = process_genome()
genome_files, genome_meta = genome_handle.run(
[resource_path + 'macs2.Human.GCA_000001405.22.fasta'],
{'assembly' : 'GRCh38'},
[]
)
print(genome_files[6:11])
# Add tests for all files created
for f_out in genome_files:
print("GENOME RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for genome indexing<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_genome import process_genome
@pytest.mark.genome
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_handle = process_genome()
genome_files, genome_meta = genome_handle.run(
[resource_path + 'macs2.Human.GCA_000001405.22.fasta'],
{'assembly' : 'GRCh38'},
[]
)
print(genome_files[6:11])
# Add tests for all files created
for f_out in genome_files:
print("GENOME RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for genome indexing"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_genome import process_genome
@pytest.mark.genome
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_handle = process_genome()
genome_files, genome_meta = genome_handle.run(
[resource_path + 'macs2.Human.GCA_000001405.22.fasta'],
{'assembly' : 'GRCh38'},
[]
)
print(genome_files[6:11])
# Add tests for all files created
for f_out in genome_files:
print("GENOME RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for genome indexing<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_genome import process_genome
@pytest.mark.genome
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_handle = process_genome()
genome_files, genome_meta = genome_handle.run(
[resource_path + 'macs2.Human.GCA_000001405.22.fasta'],
{'assembly' : 'GRCh38'},
[]
)
print(genome_files[6:11])
# Add tests for all files created
for f_out in genome_files:
print("GENOME RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
|
5260bc673c7fb12c208ab44e6d6044be3f0357ef
|
wdim/server/__init__.py
|
wdim/server/__init__.py
|
import os
import asyncio
import logging
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
from tornado.options import define
from tornado.options import options
import asyncio_mongo
from wdim.server.api import v1
logger = logging.getLogger(__name__)
define('debug', default=True, help='Debug mode')
define('port', default=1212, help='The port to listen on')
define('host', default='127.0.0.01', help='The host to listen on')
define('db', default='127.0.0.1:27017', help='TokuMX URI')
define('collection', default='wdim', help='The Mongo collection to use')
define('dbname', default='wdim20150921', help='The Mongo database to use')
def api_to_handlers(api, **kwargs):
logger.info('Loading api module {}'.format(api))
return [
(os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler, kwargs)
for (pattern, handler) in api.HANDLERS
]
def make_app(debug):
db, port = options.db.split(':')
database = asyncio.get_event_loop().run_until_complete(
asyncio_mongo.Connection.create(db, int(port))
)[options.dbname]
collection = database[options.collection]
return tornado.web.Application(
api_to_handlers(v1, database=database, collection=collection),
debug=options.debug,
)
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(options.debug) # Debug mode
logger.info('Listening on {}:{}'.format(options.host, options.port))
app.listen(options.port, options.host)
asyncio.get_event_loop().set_debug(options.debug)
asyncio.get_event_loop().run_forever()
|
Use asyncio_mongo for the server
|
Use asyncio_mongo for the server
|
Python
|
mit
|
chrisseto/Still
|
Use asyncio_mongo for the server
|
import os
import asyncio
import logging
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
from tornado.options import define
from tornado.options import options
import asyncio_mongo
from wdim.server.api import v1
logger = logging.getLogger(__name__)
define('debug', default=True, help='Debug mode')
define('port', default=1212, help='The port to listen on')
define('host', default='127.0.0.01', help='The host to listen on')
define('db', default='127.0.0.1:27017', help='TokuMX URI')
define('collection', default='wdim', help='The Mongo collection to use')
define('dbname', default='wdim20150921', help='The Mongo database to use')
def api_to_handlers(api, **kwargs):
logger.info('Loading api module {}'.format(api))
return [
(os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler, kwargs)
for (pattern, handler) in api.HANDLERS
]
def make_app(debug):
db, port = options.db.split(':')
database = asyncio.get_event_loop().run_until_complete(
asyncio_mongo.Connection.create(db, int(port))
)[options.dbname]
collection = database[options.collection]
return tornado.web.Application(
api_to_handlers(v1, database=database, collection=collection),
debug=options.debug,
)
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(options.debug) # Debug mode
logger.info('Listening on {}:{}'.format(options.host, options.port))
app.listen(options.port, options.host)
asyncio.get_event_loop().set_debug(options.debug)
asyncio.get_event_loop().run_forever()
|
<commit_before><commit_msg>Use asyncio_mongo for the server<commit_after>
|
import os
import asyncio
import logging
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
from tornado.options import define
from tornado.options import options
import asyncio_mongo
from wdim.server.api import v1
logger = logging.getLogger(__name__)
define('debug', default=True, help='Debug mode')
define('port', default=1212, help='The port to listen on')
define('host', default='127.0.0.01', help='The host to listen on')
define('db', default='127.0.0.1:27017', help='TokuMX URI')
define('collection', default='wdim', help='The Mongo collection to use')
define('dbname', default='wdim20150921', help='The Mongo database to use')
def api_to_handlers(api, **kwargs):
logger.info('Loading api module {}'.format(api))
return [
(os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler, kwargs)
for (pattern, handler) in api.HANDLERS
]
def make_app(debug):
db, port = options.db.split(':')
database = asyncio.get_event_loop().run_until_complete(
asyncio_mongo.Connection.create(db, int(port))
)[options.dbname]
collection = database[options.collection]
return tornado.web.Application(
api_to_handlers(v1, database=database, collection=collection),
debug=options.debug,
)
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(options.debug) # Debug mode
logger.info('Listening on {}:{}'.format(options.host, options.port))
app.listen(options.port, options.host)
asyncio.get_event_loop().set_debug(options.debug)
asyncio.get_event_loop().run_forever()
|
Use asyncio_mongo for the serverimport os
import asyncio
import logging
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
from tornado.options import define
from tornado.options import options
import asyncio_mongo
from wdim.server.api import v1
logger = logging.getLogger(__name__)
define('debug', default=True, help='Debug mode')
define('port', default=1212, help='The port to listen on')
define('host', default='127.0.0.01', help='The host to listen on')
define('db', default='127.0.0.1:27017', help='TokuMX URI')
define('collection', default='wdim', help='The Mongo collection to use')
define('dbname', default='wdim20150921', help='The Mongo database to use')
def api_to_handlers(api, **kwargs):
logger.info('Loading api module {}'.format(api))
return [
(os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler, kwargs)
for (pattern, handler) in api.HANDLERS
]
def make_app(debug):
db, port = options.db.split(':')
database = asyncio.get_event_loop().run_until_complete(
asyncio_mongo.Connection.create(db, int(port))
)[options.dbname]
collection = database[options.collection]
return tornado.web.Application(
api_to_handlers(v1, database=database, collection=collection),
debug=options.debug,
)
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(options.debug) # Debug mode
logger.info('Listening on {}:{}'.format(options.host, options.port))
app.listen(options.port, options.host)
asyncio.get_event_loop().set_debug(options.debug)
asyncio.get_event_loop().run_forever()
|
<commit_before><commit_msg>Use asyncio_mongo for the server<commit_after>import os
import asyncio
import logging
import tornado.web
import tornado.httpserver
import tornado.platform.asyncio
from tornado.options import define
from tornado.options import options
import asyncio_mongo
from wdim.server.api import v1
logger = logging.getLogger(__name__)
define('debug', default=True, help='Debug mode')
define('port', default=1212, help='The port to listen on')
define('host', default='127.0.0.01', help='The host to listen on')
define('db', default='127.0.0.1:27017', help='TokuMX URI')
define('collection', default='wdim', help='The Mongo collection to use')
define('dbname', default='wdim20150921', help='The Mongo database to use')
def api_to_handlers(api, **kwargs):
logger.info('Loading api module {}'.format(api))
return [
(os.path.join('/', api.PREFIX, pattern.lstrip('/')), handler, kwargs)
for (pattern, handler) in api.HANDLERS
]
def make_app(debug):
db, port = options.db.split(':')
database = asyncio.get_event_loop().run_until_complete(
asyncio_mongo.Connection.create(db, int(port))
)[options.dbname]
collection = database[options.collection]
return tornado.web.Application(
api_to_handlers(v1, database=database, collection=collection),
debug=options.debug,
)
def serve():
tornado.platform.asyncio.AsyncIOMainLoop().install()
app = make_app(options.debug) # Debug mode
logger.info('Listening on {}:{}'.format(options.host, options.port))
app.listen(options.port, options.host)
asyncio.get_event_loop().set_debug(options.debug)
asyncio.get_event_loop().run_forever()
|
|
374c1e31c5ce417221bc55da0eb1ffcb023e8493
|
slope_vs_dist/npz2pkl.py
|
slope_vs_dist/npz2pkl.py
|
"""
Load all the results and configuration data from directories containing
multiple .npz files and save them in a pickle. Strict data structure is
assumed, as created from DataGenTaskFarmMC.py.
"""
from __future__ import print_function
import os
import sys
import pickle
import numpy as np
import time
directories = sys.argv[1:]
results = []
configs = []
for datadir in directories:
print("Loading files from %s ..." % (datadir))
filesindir = os.listdir(datadir)
npzindir = [fid for fid in filesindir if fid.endswith("npz")]
npzcount = len(npzindir)
for idx, npzfile in enumerate(npzindir):
npzdata = np.load(os.path.join(datadir, npzfile))
res = npzdata["results"].item()
conf = npzdata["config"].item()
results.append(res)
configs.append(conf)
print("Finished reading %s (%i/%i)" % (npzfile, idx+1, npzcount),
end="\r")
print("")
# Load the data from the old pickle
oldpickle_file = "pkl/metric_comp_results.pkl"
if not os.path.exists(oldpickle_file):
print("Old pickle data not found. Leaving results and configs as is.")
else:
print("Loading data from old pickle file: %s" % (oldpickle_file))
oldpickle_data = pickle.load(open(oldpickle_file))
results = oldpickle_data
print("Organising configs into lists ...")
N_in = [conf["N_in"] for conf in configs]
S_in = [conf["S_in"] for conf in configs]
f_in = [conf["f_in"] for conf in configs]
uuid = [conf["id"] for conf in configs]
sigma = [conf["sigma"] for conf in configs]
weight = [conf["weight"] for conf in configs]
configs = {"N_in": N_in,
"S_in": S_in,
"f_in": f_in,
"id": uuid,
"sigma": sigma,
"weight": weight, }
conf_res = {"config": configs,
"results": results, }
datenow = time.localtime()
isodate = "%i-%02i-%02i" % (datenow.tm_year, datenow.tm_mon, datenow.tm_mday)
newpickle_file = "pkl/metric_comp_results_%s.pkl" % (isodate)
pickle.dump(conf_res, open(newpickle_file, "w"))
print("Saved everything to %s" % (newpickle_file))
print("Done!")
|
Load npz files and dump them to a pickle
|
Load npz files and dump them to a pickle
|
Python
|
apache-2.0
|
achilleas-k/brian-scripts,achilleas-k/brian-scripts
|
Load npz files and dump them to a pickle
|
"""
Load all the results and configuration data from directories containing
multiple .npz files and save them in a pickle. Strict data structure is
assumed, as created from DataGenTaskFarmMC.py.
"""
from __future__ import print_function
import os
import sys
import pickle
import numpy as np
import time
directories = sys.argv[1:]
results = []
configs = []
for datadir in directories:
print("Loading files from %s ..." % (datadir))
filesindir = os.listdir(datadir)
npzindir = [fid for fid in filesindir if fid.endswith("npz")]
npzcount = len(npzindir)
for idx, npzfile in enumerate(npzindir):
npzdata = np.load(os.path.join(datadir, npzfile))
res = npzdata["results"].item()
conf = npzdata["config"].item()
results.append(res)
configs.append(conf)
print("Finished reading %s (%i/%i)" % (npzfile, idx+1, npzcount),
end="\r")
print("")
# Load the data from the old pickle
oldpickle_file = "pkl/metric_comp_results.pkl"
if not os.path.exists(oldpickle_file):
print("Old pickle data not found. Leaving results and configs as is.")
else:
print("Loading data from old pickle file: %s" % (oldpickle_file))
oldpickle_data = pickle.load(open(oldpickle_file))
results = oldpickle_data
print("Organising configs into lists ...")
N_in = [conf["N_in"] for conf in configs]
S_in = [conf["S_in"] for conf in configs]
f_in = [conf["f_in"] for conf in configs]
uuid = [conf["id"] for conf in configs]
sigma = [conf["sigma"] for conf in configs]
weight = [conf["weight"] for conf in configs]
configs = {"N_in": N_in,
"S_in": S_in,
"f_in": f_in,
"id": uuid,
"sigma": sigma,
"weight": weight, }
conf_res = {"config": configs,
"results": results, }
datenow = time.localtime()
isodate = "%i-%02i-%02i" % (datenow.tm_year, datenow.tm_mon, datenow.tm_mday)
newpickle_file = "pkl/metric_comp_results_%s.pkl" % (isodate)
pickle.dump(conf_res, open(newpickle_file, "w"))
print("Saved everything to %s" % (newpickle_file))
print("Done!")
|
<commit_before><commit_msg>Load npz files and dump them to a pickle<commit_after>
|
"""
Load all the results and configuration data from directories containing
multiple .npz files and save them in a pickle. Strict data structure is
assumed, as created from DataGenTaskFarmMC.py.
"""
from __future__ import print_function
import os
import sys
import pickle
import numpy as np
import time
directories = sys.argv[1:]
results = []
configs = []
for datadir in directories:
print("Loading files from %s ..." % (datadir))
filesindir = os.listdir(datadir)
npzindir = [fid for fid in filesindir if fid.endswith("npz")]
npzcount = len(npzindir)
for idx, npzfile in enumerate(npzindir):
npzdata = np.load(os.path.join(datadir, npzfile))
res = npzdata["results"].item()
conf = npzdata["config"].item()
results.append(res)
configs.append(conf)
print("Finished reading %s (%i/%i)" % (npzfile, idx+1, npzcount),
end="\r")
print("")
# Load the data from the old pickle
oldpickle_file = "pkl/metric_comp_results.pkl"
if not os.path.exists(oldpickle_file):
print("Old pickle data not found. Leaving results and configs as is.")
else:
print("Loading data from old pickle file: %s" % (oldpickle_file))
oldpickle_data = pickle.load(open(oldpickle_file))
results = oldpickle_data
print("Organising configs into lists ...")
N_in = [conf["N_in"] for conf in configs]
S_in = [conf["S_in"] for conf in configs]
f_in = [conf["f_in"] for conf in configs]
uuid = [conf["id"] for conf in configs]
sigma = [conf["sigma"] for conf in configs]
weight = [conf["weight"] for conf in configs]
configs = {"N_in": N_in,
"S_in": S_in,
"f_in": f_in,
"id": uuid,
"sigma": sigma,
"weight": weight, }
conf_res = {"config": configs,
"results": results, }
datenow = time.localtime()
isodate = "%i-%02i-%02i" % (datenow.tm_year, datenow.tm_mon, datenow.tm_mday)
newpickle_file = "pkl/metric_comp_results_%s.pkl" % (isodate)
pickle.dump(conf_res, open(newpickle_file, "w"))
print("Saved everything to %s" % (newpickle_file))
print("Done!")
|
Load npz files and dump them to a pickle"""
Load all the results and configuration data from directories containing
multiple .npz files and save them in a pickle. Strict data structure is
assumed, as created from DataGenTaskFarmMC.py.
"""
from __future__ import print_function
import os
import sys
import pickle
import numpy as np
import time
directories = sys.argv[1:]
results = []
configs = []
for datadir in directories:
print("Loading files from %s ..." % (datadir))
filesindir = os.listdir(datadir)
npzindir = [fid for fid in filesindir if fid.endswith("npz")]
npzcount = len(npzindir)
for idx, npzfile in enumerate(npzindir):
npzdata = np.load(os.path.join(datadir, npzfile))
res = npzdata["results"].item()
conf = npzdata["config"].item()
results.append(res)
configs.append(conf)
print("Finished reading %s (%i/%i)" % (npzfile, idx+1, npzcount),
end="\r")
print("")
# Load the data from the old pickle
oldpickle_file = "pkl/metric_comp_results.pkl"
if not os.path.exists(oldpickle_file):
print("Old pickle data not found. Leaving results and configs as is.")
else:
print("Loading data from old pickle file: %s" % (oldpickle_file))
oldpickle_data = pickle.load(open(oldpickle_file))
results = oldpickle_data
print("Organising configs into lists ...")
N_in = [conf["N_in"] for conf in configs]
S_in = [conf["S_in"] for conf in configs]
f_in = [conf["f_in"] for conf in configs]
uuid = [conf["id"] for conf in configs]
sigma = [conf["sigma"] for conf in configs]
weight = [conf["weight"] for conf in configs]
configs = {"N_in": N_in,
"S_in": S_in,
"f_in": f_in,
"id": uuid,
"sigma": sigma,
"weight": weight, }
conf_res = {"config": configs,
"results": results, }
datenow = time.localtime()
isodate = "%i-%02i-%02i" % (datenow.tm_year, datenow.tm_mon, datenow.tm_mday)
newpickle_file = "pkl/metric_comp_results_%s.pkl" % (isodate)
pickle.dump(conf_res, open(newpickle_file, "w"))
print("Saved everything to %s" % (newpickle_file))
print("Done!")
|
<commit_before><commit_msg>Load npz files and dump them to a pickle<commit_after>"""
Load all the results and configuration data from directories containing
multiple .npz files and save them in a pickle. Strict data structure is
assumed, as created from DataGenTaskFarmMC.py.
"""
from __future__ import print_function
import os
import sys
import pickle
import numpy as np
import time
directories = sys.argv[1:]
results = []
configs = []
for datadir in directories:
print("Loading files from %s ..." % (datadir))
filesindir = os.listdir(datadir)
npzindir = [fid for fid in filesindir if fid.endswith("npz")]
npzcount = len(npzindir)
for idx, npzfile in enumerate(npzindir):
npzdata = np.load(os.path.join(datadir, npzfile))
res = npzdata["results"].item()
conf = npzdata["config"].item()
results.append(res)
configs.append(conf)
print("Finished reading %s (%i/%i)" % (npzfile, idx+1, npzcount),
end="\r")
print("")
# Load the data from the old pickle
oldpickle_file = "pkl/metric_comp_results.pkl"
if not os.path.exists(oldpickle_file):
print("Old pickle data not found. Leaving results and configs as is.")
else:
print("Loading data from old pickle file: %s" % (oldpickle_file))
oldpickle_data = pickle.load(open(oldpickle_file))
results = oldpickle_data
print("Organising configs into lists ...")
N_in = [conf["N_in"] for conf in configs]
S_in = [conf["S_in"] for conf in configs]
f_in = [conf["f_in"] for conf in configs]
uuid = [conf["id"] for conf in configs]
sigma = [conf["sigma"] for conf in configs]
weight = [conf["weight"] for conf in configs]
configs = {"N_in": N_in,
"S_in": S_in,
"f_in": f_in,
"id": uuid,
"sigma": sigma,
"weight": weight, }
conf_res = {"config": configs,
"results": results, }
datenow = time.localtime()
isodate = "%i-%02i-%02i" % (datenow.tm_year, datenow.tm_mon, datenow.tm_mday)
newpickle_file = "pkl/metric_comp_results_%s.pkl" % (isodate)
pickle.dump(conf_res, open(newpickle_file, "w"))
print("Saved everything to %s" % (newpickle_file))
print("Done!")
|
|
7e242c02dd9b1bcd1d0281153fb2591d2846fc60
|
tests/integration/test_old_report_redirect.py
|
tests/integration/test_old_report_redirect.py
|
"""Test old search links work.
e.g: /report?q=dnstwister.report goes to /search/dnstwister.report
"""
def test_redirect(webapp):
"""Test we can redirect."""
response = webapp.get('/report?q=dnstwister.report')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/search/dnstwister.report'
def test_no_redirect(webapp):
"""Test we only redirect where valid."""
response = webapp.get('/report?p=3')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/1'
|
Test of the old bookmarked links being redirected
|
Test of the old bookmarked links being redirected
|
Python
|
unlicense
|
thisismyrobot/dnstwister,thisismyrobot/dnstwister,thisismyrobot/dnstwister
|
Test of the old bookmarked links being redirected
|
"""Test old search links work.
e.g: /report?q=dnstwister.report goes to /search/dnstwister.report
"""
def test_redirect(webapp):
"""Test we can redirect."""
response = webapp.get('/report?q=dnstwister.report')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/search/dnstwister.report'
def test_no_redirect(webapp):
"""Test we only redirect where valid."""
response = webapp.get('/report?p=3')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/1'
|
<commit_before><commit_msg>Test of the old bookmarked links being redirected<commit_after>
|
"""Test old search links work.
e.g: /report?q=dnstwister.report goes to /search/dnstwister.report
"""
def test_redirect(webapp):
"""Test we can redirect."""
response = webapp.get('/report?q=dnstwister.report')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/search/dnstwister.report'
def test_no_redirect(webapp):
"""Test we only redirect where valid."""
response = webapp.get('/report?p=3')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/1'
|
Test of the old bookmarked links being redirected"""Test old search links work.
e.g: /report?q=dnstwister.report goes to /search/dnstwister.report
"""
def test_redirect(webapp):
"""Test we can redirect."""
response = webapp.get('/report?q=dnstwister.report')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/search/dnstwister.report'
def test_no_redirect(webapp):
"""Test we only redirect where valid."""
response = webapp.get('/report?p=3')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/1'
|
<commit_before><commit_msg>Test of the old bookmarked links being redirected<commit_after>"""Test old search links work.
e.g: /report?q=dnstwister.report goes to /search/dnstwister.report
"""
def test_redirect(webapp):
"""Test we can redirect."""
response = webapp.get('/report?q=dnstwister.report')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/search/dnstwister.report'
def test_no_redirect(webapp):
"""Test we only redirect where valid."""
response = webapp.get('/report?p=3')
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost:80/error/1'
|
|
d7387869c4500c4ddf8df728007cbc1f09dc767f
|
migrations/versions/0341_new_letter_rates.py
|
migrations/versions/0341_new_letter_rates.py
|
"""
Revision ID: 0341_new_letter_rates
Revises: 0340_stub_training_broadcasts
Create Date: 2021-01-27 11:58:21.393227
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0341_new_letter_rates'
down_revision = '0340_stub_training_broadcasts'
CHANGEOVER_DATE = datetime(2021, 2, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 34,
'first': 64,
'europe': 81,
'rest-of-world': 81,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
Add February 2021 letter rates
|
Add February 2021 letter rates
All rates are changing, so we add an end date for the current rates and
insert new rates for every post_class, sheet count and crown status.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add February 2021 letter rates
All rates are changing, so we add an end date for the current rates and
insert new rates for every post_class, sheet count and crown status.
|
"""
Revision ID: 0341_new_letter_rates
Revises: 0340_stub_training_broadcasts
Create Date: 2021-01-27 11:58:21.393227
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0341_new_letter_rates'
down_revision = '0340_stub_training_broadcasts'
CHANGEOVER_DATE = datetime(2021, 2, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 34,
'first': 64,
'europe': 81,
'rest-of-world': 81,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
<commit_before><commit_msg>Add February 2021 letter rates
All rates are changing, so we add an end date for the current rates and
insert new rates for every post_class, sheet count and crown status.<commit_after>
|
"""
Revision ID: 0341_new_letter_rates
Revises: 0340_stub_training_broadcasts
Create Date: 2021-01-27 11:58:21.393227
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0341_new_letter_rates'
down_revision = '0340_stub_training_broadcasts'
CHANGEOVER_DATE = datetime(2021, 2, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 34,
'first': 64,
'europe': 81,
'rest-of-world': 81,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
Add February 2021 letter rates
All rates are changing, so we add an end date for the current rates and
insert new rates for every post_class, sheet count and crown status."""
Revision ID: 0341_new_letter_rates
Revises: 0340_stub_training_broadcasts
Create Date: 2021-01-27 11:58:21.393227
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0341_new_letter_rates'
down_revision = '0340_stub_training_broadcasts'
CHANGEOVER_DATE = datetime(2021, 2, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 34,
'first': 64,
'europe': 81,
'rest-of-world': 81,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
<commit_before><commit_msg>Add February 2021 letter rates
All rates are changing, so we add an end date for the current rates and
insert new rates for every post_class, sheet count and crown status.<commit_after>"""
Revision ID: 0341_new_letter_rates
Revises: 0340_stub_training_broadcasts
Create Date: 2021-01-27 11:58:21.393227
"""
import itertools
import uuid
from datetime import datetime
from alembic import op
from sqlalchemy.sql import text
from app.models import LetterRate
revision = '0341_new_letter_rates'
down_revision = '0340_stub_training_broadcasts'
CHANGEOVER_DATE = datetime(2021, 2, 1, 0, 0)
def get_new_rate(sheet_count, post_class):
base_prices = {
'second': 34,
'first': 64,
'europe': 81,
'rest-of-world': 81,
}
multiplier = 5 if post_class in ('first', 'second') else 8
return (base_prices[post_class] + (multiplier * sheet_count)) / 100.0
def upgrade():
conn = op.get_bind()
conn.execute(text("UPDATE letter_rates SET end_date = :start WHERE end_date IS NULL"), start=CHANGEOVER_DATE)
op.bulk_insert(LetterRate.__table__, [
{
'id': uuid.uuid4(),
'start_date': CHANGEOVER_DATE,
'end_date': None,
'sheet_count': sheet_count,
'rate': get_new_rate(sheet_count, post_class),
'crown': crown,
'post_class': post_class,
}
for sheet_count, crown, post_class in itertools.product(
range(1, 6),
[True, False],
['first', 'second', 'europe', 'rest-of-world']
)
])
def downgrade():
# Make sure you've thought about billing implications etc before downgrading!
conn = op.get_bind()
conn.execute(text("DELETE FROM letter_rates WHERE start_date = :start"), start=CHANGEOVER_DATE)
conn.execute(text("UPDATE letter_rates SET end_date = NULL WHERE end_date = :start"), start=CHANGEOVER_DATE)
|
|
55a75d62bfd77c9df817e8a600a89bc4f9594f9c
|
Lib/ejm_rcparams.py
|
Lib/ejm_rcparams.py
|
import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 9
rcParams['ytick.labelsize'] = 9
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
preamble = [r'\usepackage{siunitx}']
rcParams['text.latex.preamble'] = preamble
rcParams['pgf.preamble'] = preamble
def get_figsize(width=512, factor=0.6):
fig_width_pt = width * factor
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
figsize = [fig_width_in, fig_height_in] # fig dims as a list
return figsize
|
Add module to set up plots for journally output
|
Add module to set up plots for journally output
|
Python
|
bsd-3-clause
|
eddiejessup/ciabatta
|
Add module to set up plots for journally output
|
import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 9
rcParams['ytick.labelsize'] = 9
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
preamble = [r'\usepackage{siunitx}']
rcParams['text.latex.preamble'] = preamble
rcParams['pgf.preamble'] = preamble
def get_figsize(width=512, factor=0.6):
fig_width_pt = width * factor
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
figsize = [fig_width_in, fig_height_in] # fig dims as a list
return figsize
|
<commit_before><commit_msg>Add module to set up plots for journally output<commit_after>
|
import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 9
rcParams['ytick.labelsize'] = 9
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
preamble = [r'\usepackage{siunitx}']
rcParams['text.latex.preamble'] = preamble
rcParams['pgf.preamble'] = preamble
def get_figsize(width=512, factor=0.6):
fig_width_pt = width * factor
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
figsize = [fig_width_in, fig_height_in] # fig dims as a list
return figsize
|
Add module to set up plots for journally outputimport numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 9
rcParams['ytick.labelsize'] = 9
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
preamble = [r'\usepackage{siunitx}']
rcParams['text.latex.preamble'] = preamble
rcParams['pgf.preamble'] = preamble
def get_figsize(width=512, factor=0.6):
fig_width_pt = width * factor
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
figsize = [fig_width_in, fig_height_in] # fig dims as a list
return figsize
|
<commit_before><commit_msg>Add module to set up plots for journally output<commit_after>import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
rcParams['axes.labelsize'] = 10
rcParams['xtick.labelsize'] = 9
rcParams['ytick.labelsize'] = 9
rcParams['legend.fontsize'] = 10
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
preamble = [r'\usepackage{siunitx}']
rcParams['text.latex.preamble'] = preamble
rcParams['pgf.preamble'] = preamble
def get_figsize(width=512, factor=0.6):
fig_width_pt = width * factor
inches_per_pt = 1.0 / 72.27
golden_ratio = (np.sqrt(5) - 1.0) / 2.0 # because it looks good
fig_width_in = fig_width_pt * inches_per_pt # figure width in inches
fig_height_in = fig_width_in * golden_ratio # figure height in inches
figsize = [fig_width_in, fig_height_in] # fig dims as a list
return figsize
|
|
96c339ba26d2a09576eee19ad62e5d03d5fa8a0f
|
process_pic.py
|
process_pic.py
|
from pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
|
Add a script to use the pic.py module
|
Add a script to use the pic.py module
|
Python
|
mit
|
Dixneuf19/fuzzy-octo-disco
|
Add a script to use the pic.py module
|
from pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
|
<commit_before><commit_msg>Add a script to use the pic.py module<commit_after>
|
from pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
|
Add a script to use the pic.py modulefrom pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
|
<commit_before><commit_msg>Add a script to use the pic.py module<commit_after>from pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
|
|
191170a6b1db2dd9a6c71482d0d7bf171b4c4c9a
|
download.py
|
download.py
|
#!/usr/bin/env python
import json
import sys
import urllib2
def get_url(url):
req = urllib2.Request(url)
return urllib2.urlopen(req).fp.read()
def download_url(url, file_name):
with open(file_name, 'w') as f:
f.write(get_url(url))
DROPBOX_BASE_URL = 'https://dl.dropboxusercontent.com'
def get_dropbox_download_info(url):
page = get_url(url)
download_token_index = page.index('token_hash') + 11
download_token = page[download_token_index:page.index('"',
download_token_index)]
download_url = '%s%s?dl=1&token_hash=%s' % (DROPBOX_BASE_URL,
url.split('dropbox.com')[1], download_token)
file_name = url.split('/')[-1]
return download_url, file_name
GOOGLE_DRIVE_BASE_URL = 'https://www.googleapis.com/drive/v2/files/'
def get_google_drive_download_info(url, api_key):
file_id = url.split('d/')[1].split('/')[0]
req_url = GOOGLE_DRIVE_BASE_URL + file_id + '?key=' + api_key
file_data = json.loads(get_url(req_url))
download_url = file_data['exportLinks']['application/pdf']
file_name = file_data['title'] + '.pdf'
return download_url, file_name
if __name__ == '__main__':
if sys.argv[1] == 'dropbox':
url = sys.argv[2]
download_url(*get_dropbox_download_info(url))
elif sys.argv[1] == 'drive':
url = sys.argv[2]
api_key = sys.argv[3]
download_url(*get_google_drive_download_info(url, api_key))
|
Add support for Dropbox and Google Drive
|
Add support for Dropbox and Google Drive
|
Python
|
mit
|
tomshen/cloud-download
|
Add support for Dropbox and Google Drive
|
#!/usr/bin/env python
import json
import sys
import urllib2
def get_url(url):
req = urllib2.Request(url)
return urllib2.urlopen(req).fp.read()
def download_url(url, file_name):
with open(file_name, 'w') as f:
f.write(get_url(url))
DROPBOX_BASE_URL = 'https://dl.dropboxusercontent.com'
def get_dropbox_download_info(url):
page = get_url(url)
download_token_index = page.index('token_hash') + 11
download_token = page[download_token_index:page.index('"',
download_token_index)]
download_url = '%s%s?dl=1&token_hash=%s' % (DROPBOX_BASE_URL,
url.split('dropbox.com')[1], download_token)
file_name = url.split('/')[-1]
return download_url, file_name
GOOGLE_DRIVE_BASE_URL = 'https://www.googleapis.com/drive/v2/files/'
def get_google_drive_download_info(url, api_key):
file_id = url.split('d/')[1].split('/')[0]
req_url = GOOGLE_DRIVE_BASE_URL + file_id + '?key=' + api_key
file_data = json.loads(get_url(req_url))
download_url = file_data['exportLinks']['application/pdf']
file_name = file_data['title'] + '.pdf'
return download_url, file_name
if __name__ == '__main__':
if sys.argv[1] == 'dropbox':
url = sys.argv[2]
download_url(*get_dropbox_download_info(url))
elif sys.argv[1] == 'drive':
url = sys.argv[2]
api_key = sys.argv[3]
download_url(*get_google_drive_download_info(url, api_key))
|
<commit_before><commit_msg>Add support for Dropbox and Google Drive<commit_after>
|
#!/usr/bin/env python
import json
import sys
import urllib2
def get_url(url):
req = urllib2.Request(url)
return urllib2.urlopen(req).fp.read()
def download_url(url, file_name):
with open(file_name, 'w') as f:
f.write(get_url(url))
DROPBOX_BASE_URL = 'https://dl.dropboxusercontent.com'
def get_dropbox_download_info(url):
page = get_url(url)
download_token_index = page.index('token_hash') + 11
download_token = page[download_token_index:page.index('"',
download_token_index)]
download_url = '%s%s?dl=1&token_hash=%s' % (DROPBOX_BASE_URL,
url.split('dropbox.com')[1], download_token)
file_name = url.split('/')[-1]
return download_url, file_name
GOOGLE_DRIVE_BASE_URL = 'https://www.googleapis.com/drive/v2/files/'
def get_google_drive_download_info(url, api_key):
file_id = url.split('d/')[1].split('/')[0]
req_url = GOOGLE_DRIVE_BASE_URL + file_id + '?key=' + api_key
file_data = json.loads(get_url(req_url))
download_url = file_data['exportLinks']['application/pdf']
file_name = file_data['title'] + '.pdf'
return download_url, file_name
if __name__ == '__main__':
if sys.argv[1] == 'dropbox':
url = sys.argv[2]
download_url(*get_dropbox_download_info(url))
elif sys.argv[1] == 'drive':
url = sys.argv[2]
api_key = sys.argv[3]
download_url(*get_google_drive_download_info(url, api_key))
|
Add support for Dropbox and Google Drive#!/usr/bin/env python
import json
import sys
import urllib2
def get_url(url):
req = urllib2.Request(url)
return urllib2.urlopen(req).fp.read()
def download_url(url, file_name):
with open(file_name, 'w') as f:
f.write(get_url(url))
DROPBOX_BASE_URL = 'https://dl.dropboxusercontent.com'
def get_dropbox_download_info(url):
page = get_url(url)
download_token_index = page.index('token_hash') + 11
download_token = page[download_token_index:page.index('"',
download_token_index)]
download_url = '%s%s?dl=1&token_hash=%s' % (DROPBOX_BASE_URL,
url.split('dropbox.com')[1], download_token)
file_name = url.split('/')[-1]
return download_url, file_name
GOOGLE_DRIVE_BASE_URL = 'https://www.googleapis.com/drive/v2/files/'
def get_google_drive_download_info(url, api_key):
file_id = url.split('d/')[1].split('/')[0]
req_url = GOOGLE_DRIVE_BASE_URL + file_id + '?key=' + api_key
file_data = json.loads(get_url(req_url))
download_url = file_data['exportLinks']['application/pdf']
file_name = file_data['title'] + '.pdf'
return download_url, file_name
if __name__ == '__main__':
if sys.argv[1] == 'dropbox':
url = sys.argv[2]
download_url(*get_dropbox_download_info(url))
elif sys.argv[1] == 'drive':
url = sys.argv[2]
api_key = sys.argv[3]
download_url(*get_google_drive_download_info(url, api_key))
|
<commit_before><commit_msg>Add support for Dropbox and Google Drive<commit_after>#!/usr/bin/env python
import json
import sys
import urllib2
def get_url(url):
req = urllib2.Request(url)
return urllib2.urlopen(req).fp.read()
def download_url(url, file_name):
with open(file_name, 'w') as f:
f.write(get_url(url))
DROPBOX_BASE_URL = 'https://dl.dropboxusercontent.com'
def get_dropbox_download_info(url):
page = get_url(url)
download_token_index = page.index('token_hash') + 11
download_token = page[download_token_index:page.index('"',
download_token_index)]
download_url = '%s%s?dl=1&token_hash=%s' % (DROPBOX_BASE_URL,
url.split('dropbox.com')[1], download_token)
file_name = url.split('/')[-1]
return download_url, file_name
GOOGLE_DRIVE_BASE_URL = 'https://www.googleapis.com/drive/v2/files/'
def get_google_drive_download_info(url, api_key):
file_id = url.split('d/')[1].split('/')[0]
req_url = GOOGLE_DRIVE_BASE_URL + file_id + '?key=' + api_key
file_data = json.loads(get_url(req_url))
download_url = file_data['exportLinks']['application/pdf']
file_name = file_data['title'] + '.pdf'
return download_url, file_name
if __name__ == '__main__':
if sys.argv[1] == 'dropbox':
url = sys.argv[2]
download_url(*get_dropbox_download_info(url))
elif sys.argv[1] == 'drive':
url = sys.argv[2]
api_key = sys.argv[3]
download_url(*get_google_drive_download_info(url, api_key))
|
|
651b523fa8f8aba9a7d05b697eb4287cdc52e5ed
|
lintcode/Medium/184_Largest_Num.py
|
lintcode/Medium/184_Largest_Num.py
|
class Solution:
#@param num: A list of non negative integers
#@return: A string
def largestNumber(self, num):
# write your code here
def quickSort(arr):
if (len(arr) <= 1):
return arr
mid = str(arr[0])
smaller = filter(lambda a: str(a) + mid > mid + str(a), arr[1:])
larger = filter(lambda a: str(a) + mid <= mid + str(a), arr[1:])
return quickSort(smaller) + [mid] + quickSort(larger)
sorted_num = quickSort(num)
sorted_num = map(lambda a: str(a), sorted_num)
res = ''.join(sorted_num)
return "0" if int(res) == 0 else res
|
Add solution to lintcode question 184
|
Add solution to lintcode question 184
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 184
|
class Solution:
#@param num: A list of non negative integers
#@return: A string
def largestNumber(self, num):
# write your code here
def quickSort(arr):
if (len(arr) <= 1):
return arr
mid = str(arr[0])
smaller = filter(lambda a: str(a) + mid > mid + str(a), arr[1:])
larger = filter(lambda a: str(a) + mid <= mid + str(a), arr[1:])
return quickSort(smaller) + [mid] + quickSort(larger)
sorted_num = quickSort(num)
sorted_num = map(lambda a: str(a), sorted_num)
res = ''.join(sorted_num)
return "0" if int(res) == 0 else res
|
<commit_before><commit_msg>Add solution to lintcode question 184<commit_after>
|
class Solution:
#@param num: A list of non negative integers
#@return: A string
def largestNumber(self, num):
# write your code here
def quickSort(arr):
if (len(arr) <= 1):
return arr
mid = str(arr[0])
smaller = filter(lambda a: str(a) + mid > mid + str(a), arr[1:])
larger = filter(lambda a: str(a) + mid <= mid + str(a), arr[1:])
return quickSort(smaller) + [mid] + quickSort(larger)
sorted_num = quickSort(num)
sorted_num = map(lambda a: str(a), sorted_num)
res = ''.join(sorted_num)
return "0" if int(res) == 0 else res
|
Add solution to lintcode question 184class Solution:
#@param num: A list of non negative integers
#@return: A string
def largestNumber(self, num):
# write your code here
def quickSort(arr):
if (len(arr) <= 1):
return arr
mid = str(arr[0])
smaller = filter(lambda a: str(a) + mid > mid + str(a), arr[1:])
larger = filter(lambda a: str(a) + mid <= mid + str(a), arr[1:])
return quickSort(smaller) + [mid] + quickSort(larger)
sorted_num = quickSort(num)
sorted_num = map(lambda a: str(a), sorted_num)
res = ''.join(sorted_num)
return "0" if int(res) == 0 else res
|
<commit_before><commit_msg>Add solution to lintcode question 184<commit_after>class Solution:
#@param num: A list of non negative integers
#@return: A string
def largestNumber(self, num):
# write your code here
def quickSort(arr):
if (len(arr) <= 1):
return arr
mid = str(arr[0])
smaller = filter(lambda a: str(a) + mid > mid + str(a), arr[1:])
larger = filter(lambda a: str(a) + mid <= mid + str(a), arr[1:])
return quickSort(smaller) + [mid] + quickSort(larger)
sorted_num = quickSort(num)
sorted_num = map(lambda a: str(a), sorted_num)
res = ''.join(sorted_num)
return "0" if int(res) == 0 else res
|
|
1c695ee438c348140354a323a99bd5e186905140
|
tempest/tests/services/compute/test_aggregates_client.py
|
tempest/tests/services/compute/test_aggregates_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import aggregates_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAggregatesClient(base.TestCase):
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
body = '{"aggregates": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_aggregates())
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
|
Add unit test for method list_aggregates
|
Add unit test for method list_aggregates
This patch adds unit test for aggregates_client.
Specific about method list_aggregates.
Change-Id: If66b8f8688432e6bd28c7ab6a4d0551675ef5114
|
Python
|
apache-2.0
|
pczerkas/tempest,alinbalutoiu/tempest,zsoltdudas/lis-tempest,varunarya10/tempest,JioCloud/tempest,cisco-openstack/tempest,vedujoshi/tempest,xbezdick/tempest,flyingfish007/tempest,pandeyop/tempest,pczerkas/tempest,manasi24/tempest,tonyli71/tempest,sebrandon1/tempest,roopali8/tempest,akash1808/tempest,varunarya10/tempest,LIS/lis-tempest,manasi24/jiocloud-tempest-qatempest,izadorozhna/tempest,bigswitch/tempest,LIS/lis-tempest,Juniper/tempest,manasi24/jiocloud-tempest-qatempest,pandeyop/tempest,flyingfish007/tempest,rakeshmi/tempest,masayukig/tempest,akash1808/tempest,hayderimran7/tempest,roopali8/tempest,alinbalutoiu/tempest,tudorvio/tempest,izadorozhna/tempest,rakeshmi/tempest,hayderimran7/tempest,Tesora/tesora-tempest,cisco-openstack/tempest,openstack/tempest,sebrandon1/tempest,vedujoshi/tempest,Juniper/tempest,dkalashnik/tempest,JioCloud/tempest,NexusIS/tempest,dkalashnik/tempest,masayukig/tempest,Tesora/tesora-tempest,nunogt/tempest,NexusIS/tempest,openstack/tempest,xbezdick/tempest,zsoltdudas/lis-tempest,manasi24/tempest,bigswitch/tempest,tudorvio/tempest,tonyli71/tempest,nunogt/tempest
|
Add unit test for method list_aggregates
This patch adds unit test for aggregates_client.
Specific about method list_aggregates.
Change-Id: If66b8f8688432e6bd28c7ab6a4d0551675ef5114
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import aggregates_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAggregatesClient(base.TestCase):
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
body = '{"aggregates": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_aggregates())
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method list_aggregates
This patch adds unit test for aggregates_client.
Specific about method list_aggregates.
Change-Id: If66b8f8688432e6bd28c7ab6a4d0551675ef5114<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import aggregates_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAggregatesClient(base.TestCase):
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
body = '{"aggregates": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_aggregates())
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
|
Add unit test for method list_aggregates
This patch adds unit test for aggregates_client.
Specific about method list_aggregates.
Change-Id: If66b8f8688432e6bd28c7ab6a4d0551675ef5114# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import aggregates_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAggregatesClient(base.TestCase):
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
body = '{"aggregates": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_aggregates())
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method list_aggregates
This patch adds unit test for aggregates_client.
Specific about method list_aggregates.
Change-Id: If66b8f8688432e6bd28c7ab6a4d0551675ef5114<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import aggregates_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAggregatesClient(base.TestCase):
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
body = '{"aggregates": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_aggregates())
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
|
|
a4b0ecd0c1a3287d97fed6ae9418032698c1cfe0
|
read_launch_plists.py
|
read_launch_plists.py
|
#!/usr/bin/env python
#
# This script reads system launch daemons and agents.
#
# Python 3.4 is required to read binary plists, or convert them first with,
# find /System/Library/Launch* -type f -exec sudo plutil -convert xml1 {} \;
import glob
import os
import plistlib
header ='filename,label,program,runatload,comment'
location = '/System/Library/Launch%s/*.plist'
def LoadPlist(filename):
"""Plists can be read with plistlib."""
try:
return plistlib.readPlist(filename)
except:
print('python3.4 is required to read binary plist %s, skipping' % filename)
return None
def GetStatus(plist):
"""Plists may have a RunAtLoad key."""
try:
return plist['RunAtLoad']
except KeyError:
return 'False'
def GetLabel(plist):
"""Plists have a label."""
return plist['Label']
def GetProgram(plist):
"""Plists have either a Program or ProgramArguments key,
if the executable requires command line options.
"""
try:
return plist['Program']
except KeyError:
return plist['ProgramArguments']
def main():
"""Main function."""
print(header)
for kind in ['Daemons', 'Agents']:
for filename in glob.glob(location % kind):
p = LoadPlist(filename)
if p:
e = (filename, GetLabel(p), '"%s"' % GetProgram(p), GetStatus(p))
print('%s,%s,%s,%s,' % (e))
if __name__ == '__main__':
main()
|
Add script for reading plists.
|
Add script for reading plists.
|
Python
|
mit
|
drduh/OS-X-Yosemite-Security-and-Privacy-Guide,ScheerMT/OS-X-Yosemite-Security-and-Privacy-Guide,drduh/macOS-Security-and-Privacy-Guide,drduh/OS-X-Security-and-Privacy-Guide,DeadLion/macOS-Security-and-Privacy-Guide
|
Add script for reading plists.
|
#!/usr/bin/env python
#
# This script reads system launch daemons and agents.
#
# Python 3.4 is required to read binary plists, or convert them first with,
# find /System/Library/Launch* -type f -exec sudo plutil -convert xml1 {} \;
import glob
import os
import plistlib
header ='filename,label,program,runatload,comment'
location = '/System/Library/Launch%s/*.plist'
def LoadPlist(filename):
"""Plists can be read with plistlib."""
try:
return plistlib.readPlist(filename)
except:
print('python3.4 is required to read binary plist %s, skipping' % filename)
return None
def GetStatus(plist):
"""Plists may have a RunAtLoad key."""
try:
return plist['RunAtLoad']
except KeyError:
return 'False'
def GetLabel(plist):
"""Plists have a label."""
return plist['Label']
def GetProgram(plist):
"""Plists have either a Program or ProgramArguments key,
if the executable requires command line options.
"""
try:
return plist['Program']
except KeyError:
return plist['ProgramArguments']
def main():
"""Main function."""
print(header)
for kind in ['Daemons', 'Agents']:
for filename in glob.glob(location % kind):
p = LoadPlist(filename)
if p:
e = (filename, GetLabel(p), '"%s"' % GetProgram(p), GetStatus(p))
print('%s,%s,%s,%s,' % (e))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for reading plists.<commit_after>
|
#!/usr/bin/env python
#
# This script reads system launch daemons and agents.
#
# Python 3.4 is required to read binary plists, or convert them first with,
# find /System/Library/Launch* -type f -exec sudo plutil -convert xml1 {} \;
import glob
import os
import plistlib
header ='filename,label,program,runatload,comment'
location = '/System/Library/Launch%s/*.plist'
def LoadPlist(filename):
"""Plists can be read with plistlib."""
try:
return plistlib.readPlist(filename)
except:
print('python3.4 is required to read binary plist %s, skipping' % filename)
return None
def GetStatus(plist):
"""Plists may have a RunAtLoad key."""
try:
return plist['RunAtLoad']
except KeyError:
return 'False'
def GetLabel(plist):
"""Plists have a label."""
return plist['Label']
def GetProgram(plist):
"""Plists have either a Program or ProgramArguments key,
if the executable requires command line options.
"""
try:
return plist['Program']
except KeyError:
return plist['ProgramArguments']
def main():
"""Main function."""
print(header)
for kind in ['Daemons', 'Agents']:
for filename in glob.glob(location % kind):
p = LoadPlist(filename)
if p:
e = (filename, GetLabel(p), '"%s"' % GetProgram(p), GetStatus(p))
print('%s,%s,%s,%s,' % (e))
if __name__ == '__main__':
main()
|
Add script for reading plists.#!/usr/bin/env python
#
# This script reads system launch daemons and agents.
#
# Python 3.4 is required to read binary plists, or convert them first with,
# find /System/Library/Launch* -type f -exec sudo plutil -convert xml1 {} \;
import glob
import os
import plistlib
header ='filename,label,program,runatload,comment'
location = '/System/Library/Launch%s/*.plist'
def LoadPlist(filename):
"""Plists can be read with plistlib."""
try:
return plistlib.readPlist(filename)
except:
print('python3.4 is required to read binary plist %s, skipping' % filename)
return None
def GetStatus(plist):
"""Plists may have a RunAtLoad key."""
try:
return plist['RunAtLoad']
except KeyError:
return 'False'
def GetLabel(plist):
"""Plists have a label."""
return plist['Label']
def GetProgram(plist):
"""Plists have either a Program or ProgramArguments key,
if the executable requires command line options.
"""
try:
return plist['Program']
except KeyError:
return plist['ProgramArguments']
def main():
"""Main function."""
print(header)
for kind in ['Daemons', 'Agents']:
for filename in glob.glob(location % kind):
p = LoadPlist(filename)
if p:
e = (filename, GetLabel(p), '"%s"' % GetProgram(p), GetStatus(p))
print('%s,%s,%s,%s,' % (e))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for reading plists.<commit_after>#!/usr/bin/env python
#
# This script reads system launch daemons and agents.
#
# Python 3.4 is required to read binary plists, or convert them first with,
# find /System/Library/Launch* -type f -exec sudo plutil -convert xml1 {} \;
import glob
import os
import plistlib
header ='filename,label,program,runatload,comment'
location = '/System/Library/Launch%s/*.plist'
def LoadPlist(filename):
"""Plists can be read with plistlib."""
try:
return plistlib.readPlist(filename)
except:
print('python3.4 is required to read binary plist %s, skipping' % filename)
return None
def GetStatus(plist):
"""Plists may have a RunAtLoad key."""
try:
return plist['RunAtLoad']
except KeyError:
return 'False'
def GetLabel(plist):
"""Plists have a label."""
return plist['Label']
def GetProgram(plist):
"""Plists have either a Program or ProgramArguments key,
if the executable requires command line options.
"""
try:
return plist['Program']
except KeyError:
return plist['ProgramArguments']
def main():
"""Main function."""
print(header)
for kind in ['Daemons', 'Agents']:
for filename in glob.glob(location % kind):
p = LoadPlist(filename)
if p:
e = (filename, GetLabel(p), '"%s"' % GetProgram(p), GetStatus(p))
print('%s,%s,%s,%s,' % (e))
if __name__ == '__main__':
main()
|
|
8a43d4d603a3bbad0c2f368c6c1d327a6c09b793
|
src/python/gcld3_test.py
|
src/python/gcld3_test.py
|
"""Tests for gcld3."""
import gcld3
import unittest
class NnetLanguageIdentifierTest(unittest.TestCase):
def testLangIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = "This text is written in English."
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "en")
self.assertTrue(result.is_reliable)
self.assertGreater(result.proportion, 0.99)
self.assertGreater(result.probability, 0.90)
def testEmptyString(self):
detector = gcld3.NNetLanguageIdentifier(
min_num_bytes=10, max_num_bytes=1000)
sample = ""
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "und")
self.assertFalse(result.is_reliable)
self.assertEqual(result.proportion, 0.0)
self.assertEqual(result.probability, 0.00)
def testLangsIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = ("This piece of text is in English. Този текст е на " "Български.")
results = detector.FindTopNMostFreqLangs(text=sample, num_langs=2)
self.assertEqual(results[0].language, "bg")
self.assertTrue(results[0].is_reliable)
self.assertLess(results[0].proportion, 0.75)
self.assertGreater(results[0].probability, 0.90)
self.assertEqual(results[1].language, "en")
self.assertTrue(results[1].is_reliable)
self.assertLess(results[1].proportion, 0.75)
self.assertGreater(results[1].probability, 0.90)
if __name__ == "__main__":
unittest.main()
|
Add a python unit test
|
Add a python unit test
|
Python
|
apache-2.0
|
google/cld3,google/cld3
|
Add a python unit test
|
"""Tests for gcld3."""
import gcld3
import unittest
class NnetLanguageIdentifierTest(unittest.TestCase):
def testLangIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = "This text is written in English."
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "en")
self.assertTrue(result.is_reliable)
self.assertGreater(result.proportion, 0.99)
self.assertGreater(result.probability, 0.90)
def testEmptyString(self):
detector = gcld3.NNetLanguageIdentifier(
min_num_bytes=10, max_num_bytes=1000)
sample = ""
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "und")
self.assertFalse(result.is_reliable)
self.assertEqual(result.proportion, 0.0)
self.assertEqual(result.probability, 0.00)
def testLangsIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = ("This piece of text is in English. Този текст е на " "Български.")
results = detector.FindTopNMostFreqLangs(text=sample, num_langs=2)
self.assertEqual(results[0].language, "bg")
self.assertTrue(results[0].is_reliable)
self.assertLess(results[0].proportion, 0.75)
self.assertGreater(results[0].probability, 0.90)
self.assertEqual(results[1].language, "en")
self.assertTrue(results[1].is_reliable)
self.assertLess(results[1].proportion, 0.75)
self.assertGreater(results[1].probability, 0.90)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a python unit test<commit_after>
|
"""Tests for gcld3."""
import gcld3
import unittest
class NnetLanguageIdentifierTest(unittest.TestCase):
def testLangIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = "This text is written in English."
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "en")
self.assertTrue(result.is_reliable)
self.assertGreater(result.proportion, 0.99)
self.assertGreater(result.probability, 0.90)
def testEmptyString(self):
detector = gcld3.NNetLanguageIdentifier(
min_num_bytes=10, max_num_bytes=1000)
sample = ""
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "und")
self.assertFalse(result.is_reliable)
self.assertEqual(result.proportion, 0.0)
self.assertEqual(result.probability, 0.00)
def testLangsIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = ("This piece of text is in English. Този текст е на " "Български.")
results = detector.FindTopNMostFreqLangs(text=sample, num_langs=2)
self.assertEqual(results[0].language, "bg")
self.assertTrue(results[0].is_reliable)
self.assertLess(results[0].proportion, 0.75)
self.assertGreater(results[0].probability, 0.90)
self.assertEqual(results[1].language, "en")
self.assertTrue(results[1].is_reliable)
self.assertLess(results[1].proportion, 0.75)
self.assertGreater(results[1].probability, 0.90)
if __name__ == "__main__":
unittest.main()
|
Add a python unit test"""Tests for gcld3."""
import gcld3
import unittest
class NnetLanguageIdentifierTest(unittest.TestCase):
def testLangIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = "This text is written in English."
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "en")
self.assertTrue(result.is_reliable)
self.assertGreater(result.proportion, 0.99)
self.assertGreater(result.probability, 0.90)
def testEmptyString(self):
detector = gcld3.NNetLanguageIdentifier(
min_num_bytes=10, max_num_bytes=1000)
sample = ""
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "und")
self.assertFalse(result.is_reliable)
self.assertEqual(result.proportion, 0.0)
self.assertEqual(result.probability, 0.00)
def testLangsIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = ("This piece of text is in English. Този текст е на " "Български.")
results = detector.FindTopNMostFreqLangs(text=sample, num_langs=2)
self.assertEqual(results[0].language, "bg")
self.assertTrue(results[0].is_reliable)
self.assertLess(results[0].proportion, 0.75)
self.assertGreater(results[0].probability, 0.90)
self.assertEqual(results[1].language, "en")
self.assertTrue(results[1].is_reliable)
self.assertLess(results[1].proportion, 0.75)
self.assertGreater(results[1].probability, 0.90)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a python unit test<commit_after>"""Tests for gcld3."""
import gcld3
import unittest
class NnetLanguageIdentifierTest(unittest.TestCase):
def testLangIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = "This text is written in English."
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "en")
self.assertTrue(result.is_reliable)
self.assertGreater(result.proportion, 0.99)
self.assertGreater(result.probability, 0.90)
def testEmptyString(self):
detector = gcld3.NNetLanguageIdentifier(
min_num_bytes=10, max_num_bytes=1000)
sample = ""
result = detector.FindLanguage(text=sample)
self.assertEqual(result.language, "und")
self.assertFalse(result.is_reliable)
self.assertEqual(result.proportion, 0.0)
self.assertEqual(result.probability, 0.00)
def testLangsIdentification(self):
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=0, max_num_bytes=1000)
sample = ("This piece of text is in English. Този текст е на " "Български.")
results = detector.FindTopNMostFreqLangs(text=sample, num_langs=2)
self.assertEqual(results[0].language, "bg")
self.assertTrue(results[0].is_reliable)
self.assertLess(results[0].proportion, 0.75)
self.assertGreater(results[0].probability, 0.90)
self.assertEqual(results[1].language, "en")
self.assertTrue(results[1].is_reliable)
self.assertLess(results[1].proportion, 0.75)
self.assertGreater(results[1].probability, 0.90)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.