commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6329cb17fdd0c2e0df8c0fe057972169f97c737f
|
pretty_json.py
|
pretty_json.py
|
#!/usr/bin/env python
import json, sys
def main():
nargs = len(sys.argv)
if nargs == 1:
f = sys.stdin
elif nargs == 2:
f = open(sys.argv[1], 'r')
else:
print('Usage: %s file' % sys.argv[0])
return
json.dump(json.load(f), sys.stdout, indent=2)
if __name__ == '__main__':
main()
|
Add script to reformat json files for humans
|
Add script to reformat json files for humans
I'm often working with json data that is omits newlines and indentation
for consumption by a machine, but I often need to make sense of it
myself as well. This script is a small wrapper around the Python parser
to reformat the json data with newlines and indentation.
|
Python
|
mit
|
DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk
|
Add script to reformat json files for humans
I'm often working with json data that is omits newlines and indentation
for consumption by a machine, but I often need to make sense of it
myself as well. This script is a small wrapper around the Python parser
to reformat the json data with newlines and indentation.
|
#!/usr/bin/env python
import json, sys
def main():
nargs = len(sys.argv)
if nargs == 1:
f = sys.stdin
elif nargs == 2:
f = open(sys.argv[1], 'r')
else:
print('Usage: %s file' % sys.argv[0])
return
json.dump(json.load(f), sys.stdout, indent=2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to reformat json files for humans
I'm often working with json data that is omits newlines and indentation
for consumption by a machine, but I often need to make sense of it
myself as well. This script is a small wrapper around the Python parser
to reformat the json data with newlines and indentation.<commit_after>
|
#!/usr/bin/env python
import json, sys
def main():
nargs = len(sys.argv)
if nargs == 1:
f = sys.stdin
elif nargs == 2:
f = open(sys.argv[1], 'r')
else:
print('Usage: %s file' % sys.argv[0])
return
json.dump(json.load(f), sys.stdout, indent=2)
if __name__ == '__main__':
main()
|
Add script to reformat json files for humans
I'm often working with json data that is omits newlines and indentation
for consumption by a machine, but I often need to make sense of it
myself as well. This script is a small wrapper around the Python parser
to reformat the json data with newlines and indentation.#!/usr/bin/env python
import json, sys
def main():
nargs = len(sys.argv)
if nargs == 1:
f = sys.stdin
elif nargs == 2:
f = open(sys.argv[1], 'r')
else:
print('Usage: %s file' % sys.argv[0])
return
json.dump(json.load(f), sys.stdout, indent=2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to reformat json files for humans
I'm often working with json data that is omits newlines and indentation
for consumption by a machine, but I often need to make sense of it
myself as well. This script is a small wrapper around the Python parser
to reformat the json data with newlines and indentation.<commit_after>#!/usr/bin/env python
import json, sys
def main():
nargs = len(sys.argv)
if nargs == 1:
f = sys.stdin
elif nargs == 2:
f = open(sys.argv[1], 'r')
else:
print('Usage: %s file' % sys.argv[0])
return
json.dump(json.load(f), sys.stdout, indent=2)
if __name__ == '__main__':
main()
|
|
5f750b969928c0644712b06eea6d10f6860f567a
|
oedb_datamodels/versions/1c6e2fb3d3b6_popularity_tracking.py
|
oedb_datamodels/versions/1c6e2fb3d3b6_popularity_tracking.py
|
"""Add columns to track popularity of tags
Revision ID: 1c6e2fb3d3b6
Revises: 6887c442bbee
Create Date: 2019-04-29 11:30:45.528110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c6e2fb3d3b6'
down_revision = '6887c442bbee'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tags', sa.Column('usage_count', sa.BigInteger(), server_default='0', nullable=True))
op.add_column('tags', sa.Column('usage_tracked_since', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
def downgrade():
op.drop_column('tags', 'usage_tracked_since')
op.drop_column('tags', 'usage_count')
|
Add tag popularity to alembic
|
Add tag popularity to alembic
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
|
Add tag popularity to alembic
|
"""Add columns to track popularity of tags
Revision ID: 1c6e2fb3d3b6
Revises: 6887c442bbee
Create Date: 2019-04-29 11:30:45.528110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c6e2fb3d3b6'
down_revision = '6887c442bbee'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tags', sa.Column('usage_count', sa.BigInteger(), server_default='0', nullable=True))
op.add_column('tags', sa.Column('usage_tracked_since', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
def downgrade():
op.drop_column('tags', 'usage_tracked_since')
op.drop_column('tags', 'usage_count')
|
<commit_before><commit_msg>Add tag popularity to alembic<commit_after>
|
"""Add columns to track popularity of tags
Revision ID: 1c6e2fb3d3b6
Revises: 6887c442bbee
Create Date: 2019-04-29 11:30:45.528110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c6e2fb3d3b6'
down_revision = '6887c442bbee'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tags', sa.Column('usage_count', sa.BigInteger(), server_default='0', nullable=True))
op.add_column('tags', sa.Column('usage_tracked_since', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
def downgrade():
op.drop_column('tags', 'usage_tracked_since')
op.drop_column('tags', 'usage_count')
|
Add tag popularity to alembic"""Add columns to track popularity of tags
Revision ID: 1c6e2fb3d3b6
Revises: 6887c442bbee
Create Date: 2019-04-29 11:30:45.528110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c6e2fb3d3b6'
down_revision = '6887c442bbee'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tags', sa.Column('usage_count', sa.BigInteger(), server_default='0', nullable=True))
op.add_column('tags', sa.Column('usage_tracked_since', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
def downgrade():
op.drop_column('tags', 'usage_tracked_since')
op.drop_column('tags', 'usage_count')
|
<commit_before><commit_msg>Add tag popularity to alembic<commit_after>"""Add columns to track popularity of tags
Revision ID: 1c6e2fb3d3b6
Revises: 6887c442bbee
Create Date: 2019-04-29 11:30:45.528110
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c6e2fb3d3b6'
down_revision = '6887c442bbee'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('tags', sa.Column('usage_count', sa.BigInteger(), server_default='0', nullable=True))
op.add_column('tags', sa.Column('usage_tracked_since', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
def downgrade():
op.drop_column('tags', 'usage_tracked_since')
op.drop_column('tags', 'usage_count')
|
|
dc546e01bab2f338fff40d38a529d515657ae725
|
dot_ipython/private_profile_default/private_startup/private_00-pandas.py
|
dot_ipython/private_profile_default/private_startup/private_00-pandas.py
|
import sys
import pandas as pd
pd.options.display.max_columns = 100
pd.options.display.min_rows = 20
pd.options.display.width = None if sys.stdout.isatty() else sys.maxsize
|
Add ipython pandas startup script.
|
Add ipython pandas startup script.
|
Python
|
mit
|
Li9htmare/dotfiles
|
Add ipython pandas startup script.
|
import sys
import pandas as pd
pd.options.display.max_columns = 100
pd.options.display.min_rows = 20
pd.options.display.width = None if sys.stdout.isatty() else sys.maxsize
|
<commit_before><commit_msg>Add ipython pandas startup script.<commit_after>
|
import sys
import pandas as pd
pd.options.display.max_columns = 100
pd.options.display.min_rows = 20
pd.options.display.width = None if sys.stdout.isatty() else sys.maxsize
|
Add ipython pandas startup script.import sys
import pandas as pd
pd.options.display.max_columns = 100
pd.options.display.min_rows = 20
pd.options.display.width = None if sys.stdout.isatty() else sys.maxsize
|
<commit_before><commit_msg>Add ipython pandas startup script.<commit_after>import sys
import pandas as pd
pd.options.display.max_columns = 100
pd.options.display.min_rows = 20
pd.options.display.width = None if sys.stdout.isatty() else sys.maxsize
|
|
d78e84fdcb977e856406e47adcfbeccffad020dc
|
mysite/extra_translations.py
|
mysite/extra_translations.py
|
# This module exists just to list strings for translation to be picked
# up by makemessages.
from django.utils.translation import ugettext as _
# Labels for the extra fields which are defined in the database.
# Costa Rica:
_('Profession')
_('Important Roles')
_('Standing for re-election')
|
Add some strings for translation to be picked up by makemessages
|
Add some strings for translation to be picked up by makemessages
|
Python
|
agpl-3.0
|
neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative
|
Add some strings for translation to be picked up by makemessages
|
# This module exists just to list strings for translation to be picked
# up by makemessages.
from django.utils.translation import ugettext as _
# Labels for the extra fields which are defined in the database.
# Costa Rica:
_('Profession')
_('Important Roles')
_('Standing for re-election')
|
<commit_before><commit_msg>Add some strings for translation to be picked up by makemessages<commit_after>
|
# This module exists just to list strings for translation to be picked
# up by makemessages.
from django.utils.translation import ugettext as _
# Labels for the extra fields which are defined in the database.
# Costa Rica:
_('Profession')
_('Important Roles')
_('Standing for re-election')
|
Add some strings for translation to be picked up by makemessages# This module exists just to list strings for translation to be picked
# up by makemessages.
from django.utils.translation import ugettext as _
# Labels for the extra fields which are defined in the database.
# Costa Rica:
_('Profession')
_('Important Roles')
_('Standing for re-election')
|
<commit_before><commit_msg>Add some strings for translation to be picked up by makemessages<commit_after># This module exists just to list strings for translation to be picked
# up by makemessages.
from django.utils.translation import ugettext as _
# Labels for the extra fields which are defined in the database.
# Costa Rica:
_('Profession')
_('Important Roles')
_('Standing for re-election')
|
|
b078084f99a5e7afeee80cfd8b370ed18ef88060
|
corehq/apps/reminders/management/commands/find_reminder_usage.py
|
corehq/apps/reminders/management/commands/find_reminder_usage.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, REMINDER_TYPE_KEYWORD_INITIATED
from django.core.management.base import BaseCommand
class DomainResult(object):
def __init__(self):
self.num_active = 0
self.num_inactive = 0
self.types_and_counts = defaultdict(lambda: 0)
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
result = defaultdict(lambda: DomainResult())
for handler in handlers:
if handler.reminder_type != REMINDER_TYPE_KEYWORD_INITIATED:
if handler.active:
result[handler.domain].num_active += 1
else:
result[handler.domain].num_inactive += 1
result[handler.domain].types_and_counts[handler.reminder_type] += 1
# Sort by num_active and then domain
sorted_result = sorted(
result.items(),
key=lambda two_tuple: (two_tuple[1].num_active, two_tuple[0])
)
with open('reminder_status.log', 'w') as f:
for domain, result in sorted_result:
f.write('{}\t{}\t{}\n'.format(domain, result.num_active, result.num_inactive))
for reminder_type, count in result.types_and_counts.items():
f.write('\t{}\t{}\n'.format(reminder_type, count))
|
Add script to find old reminder framework usage
|
Add script to find old reminder framework usage
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add script to find old reminder framework usage
|
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, REMINDER_TYPE_KEYWORD_INITIATED
from django.core.management.base import BaseCommand
class DomainResult(object):
def __init__(self):
self.num_active = 0
self.num_inactive = 0
self.types_and_counts = defaultdict(lambda: 0)
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
result = defaultdict(lambda: DomainResult())
for handler in handlers:
if handler.reminder_type != REMINDER_TYPE_KEYWORD_INITIATED:
if handler.active:
result[handler.domain].num_active += 1
else:
result[handler.domain].num_inactive += 1
result[handler.domain].types_and_counts[handler.reminder_type] += 1
# Sort by num_active and then domain
sorted_result = sorted(
result.items(),
key=lambda two_tuple: (two_tuple[1].num_active, two_tuple[0])
)
with open('reminder_status.log', 'w') as f:
for domain, result in sorted_result:
f.write('{}\t{}\t{}\n'.format(domain, result.num_active, result.num_inactive))
for reminder_type, count in result.types_and_counts.items():
f.write('\t{}\t{}\n'.format(reminder_type, count))
|
<commit_before><commit_msg>Add script to find old reminder framework usage<commit_after>
|
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, REMINDER_TYPE_KEYWORD_INITIATED
from django.core.management.base import BaseCommand
class DomainResult(object):
def __init__(self):
self.num_active = 0
self.num_inactive = 0
self.types_and_counts = defaultdict(lambda: 0)
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
result = defaultdict(lambda: DomainResult())
for handler in handlers:
if handler.reminder_type != REMINDER_TYPE_KEYWORD_INITIATED:
if handler.active:
result[handler.domain].num_active += 1
else:
result[handler.domain].num_inactive += 1
result[handler.domain].types_and_counts[handler.reminder_type] += 1
# Sort by num_active and then domain
sorted_result = sorted(
result.items(),
key=lambda two_tuple: (two_tuple[1].num_active, two_tuple[0])
)
with open('reminder_status.log', 'w') as f:
for domain, result in sorted_result:
f.write('{}\t{}\t{}\n'.format(domain, result.num_active, result.num_inactive))
for reminder_type, count in result.types_and_counts.items():
f.write('\t{}\t{}\n'.format(reminder_type, count))
|
Add script to find old reminder framework usagefrom __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, REMINDER_TYPE_KEYWORD_INITIATED
from django.core.management.base import BaseCommand
class DomainResult(object):
def __init__(self):
self.num_active = 0
self.num_inactive = 0
self.types_and_counts = defaultdict(lambda: 0)
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
result = defaultdict(lambda: DomainResult())
for handler in handlers:
if handler.reminder_type != REMINDER_TYPE_KEYWORD_INITIATED:
if handler.active:
result[handler.domain].num_active += 1
else:
result[handler.domain].num_inactive += 1
result[handler.domain].types_and_counts[handler.reminder_type] += 1
# Sort by num_active and then domain
sorted_result = sorted(
result.items(),
key=lambda two_tuple: (two_tuple[1].num_active, two_tuple[0])
)
with open('reminder_status.log', 'w') as f:
for domain, result in sorted_result:
f.write('{}\t{}\t{}\n'.format(domain, result.num_active, result.num_inactive))
for reminder_type, count in result.types_and_counts.items():
f.write('\t{}\t{}\n'.format(reminder_type, count))
|
<commit_before><commit_msg>Add script to find old reminder framework usage<commit_after>from __future__ import absolute_import
from __future__ import unicode_literals
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, REMINDER_TYPE_KEYWORD_INITIATED
from django.core.management.base import BaseCommand
class DomainResult(object):
def __init__(self):
self.num_active = 0
self.num_inactive = 0
self.types_and_counts = defaultdict(lambda: 0)
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
result = defaultdict(lambda: DomainResult())
for handler in handlers:
if handler.reminder_type != REMINDER_TYPE_KEYWORD_INITIATED:
if handler.active:
result[handler.domain].num_active += 1
else:
result[handler.domain].num_inactive += 1
result[handler.domain].types_and_counts[handler.reminder_type] += 1
# Sort by num_active and then domain
sorted_result = sorted(
result.items(),
key=lambda two_tuple: (two_tuple[1].num_active, two_tuple[0])
)
with open('reminder_status.log', 'w') as f:
for domain, result in sorted_result:
f.write('{}\t{}\t{}\n'.format(domain, result.num_active, result.num_inactive))
for reminder_type, count in result.types_and_counts.items():
f.write('\t{}\t{}\n'.format(reminder_type, count))
|
|
bfbfa9179fe58e90b8edd9ce138a99d5cac993cc
|
test/api/test_build.py
|
test/api/test_build.py
|
import mock
from piper.api.build import Build
class TestBuildGet(object):
def setup_method(self, method):
self.build = Build()
self.build.db = mock.Mock()
self.build_id = 871263487612384761243 # ?
def test_found(self):
ret = self.build.get(self.build_id)
assert ret is self.build.db.get_build.return_value
self.build.db.get_build.assert_called_once_with(self.build_id)
def test_not_found(self):
self.build.db.get_build.return_value = None
ret, code = self.build.get(1)
assert ret == {}
assert code == 404
|
Add tests for Build() API
|
Add tests for Build() API
|
Python
|
mit
|
thiderman/piper
|
Add tests for Build() API
|
import mock
from piper.api.build import Build
class TestBuildGet(object):
def setup_method(self, method):
self.build = Build()
self.build.db = mock.Mock()
self.build_id = 871263487612384761243 # ?
def test_found(self):
ret = self.build.get(self.build_id)
assert ret is self.build.db.get_build.return_value
self.build.db.get_build.assert_called_once_with(self.build_id)
def test_not_found(self):
self.build.db.get_build.return_value = None
ret, code = self.build.get(1)
assert ret == {}
assert code == 404
|
<commit_before><commit_msg>Add tests for Build() API<commit_after>
|
import mock
from piper.api.build import Build
class TestBuildGet(object):
def setup_method(self, method):
self.build = Build()
self.build.db = mock.Mock()
self.build_id = 871263487612384761243 # ?
def test_found(self):
ret = self.build.get(self.build_id)
assert ret is self.build.db.get_build.return_value
self.build.db.get_build.assert_called_once_with(self.build_id)
def test_not_found(self):
self.build.db.get_build.return_value = None
ret, code = self.build.get(1)
assert ret == {}
assert code == 404
|
Add tests for Build() APIimport mock
from piper.api.build import Build
class TestBuildGet(object):
def setup_method(self, method):
self.build = Build()
self.build.db = mock.Mock()
self.build_id = 871263487612384761243 # ?
def test_found(self):
ret = self.build.get(self.build_id)
assert ret is self.build.db.get_build.return_value
self.build.db.get_build.assert_called_once_with(self.build_id)
def test_not_found(self):
self.build.db.get_build.return_value = None
ret, code = self.build.get(1)
assert ret == {}
assert code == 404
|
<commit_before><commit_msg>Add tests for Build() API<commit_after>import mock
from piper.api.build import Build
class TestBuildGet(object):
def setup_method(self, method):
self.build = Build()
self.build.db = mock.Mock()
self.build_id = 871263487612384761243 # ?
def test_found(self):
ret = self.build.get(self.build_id)
assert ret is self.build.db.get_build.return_value
self.build.db.get_build.assert_called_once_with(self.build_id)
def test_not_found(self):
self.build.db.get_build.return_value = None
ret, code = self.build.get(1)
assert ret == {}
assert code == 404
|
|
80270e691597b679d4975c807234bd3c4f50eaae
|
data-analysis/snippet_chart_area_at_each_time_step.py
|
data-analysis/snippet_chart_area_at_each_time_step.py
|
# Unneeded charting code that was removed from a jupyter notebook. Stored here as an example for later
# Draws a chart with the total area of multiple protin receptors at each time step
tmpdf = totals65.loc[(totals65['Receptor'].isin(['M1', 'M5', 'M7', 'M22', 'M26'])) &
(totals65['Experiment Step'] == '-nl-post')]
pal = ['black', 'blue', 'red', 'orange', 'green']
g = sns.FacetGrid(tmpdf, col='Time Point', col_wrap=3,
size=5, ylim=(0,100), xlim=(0,100),
palette=pal,
hue='Receptor',
hue_order=['M1', 'M5', 'M7', 'M22', 'M26'],
hue_kws=dict(marker=['^', 'v', '*', '+', 'x']))
g.map(plt.scatter, 'Total Number Scaled', 'Total Area Scaled')
g.add_legend()
g.savefig('mutants_by_time_nl-post.png')
|
Add snippet - chart total area at each time step
|
Add snippet - chart total area at each time step
|
Python
|
mit
|
daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various
|
Add snippet - chart total area at each time step
|
# Unneeded charting code that was removed from a jupyter notebook. Stored here as an example for later
# Draws a chart with the total area of multiple protin receptors at each time step
tmpdf = totals65.loc[(totals65['Receptor'].isin(['M1', 'M5', 'M7', 'M22', 'M26'])) &
(totals65['Experiment Step'] == '-nl-post')]
pal = ['black', 'blue', 'red', 'orange', 'green']
g = sns.FacetGrid(tmpdf, col='Time Point', col_wrap=3,
size=5, ylim=(0,100), xlim=(0,100),
palette=pal,
hue='Receptor',
hue_order=['M1', 'M5', 'M7', 'M22', 'M26'],
hue_kws=dict(marker=['^', 'v', '*', '+', 'x']))
g.map(plt.scatter, 'Total Number Scaled', 'Total Area Scaled')
g.add_legend()
g.savefig('mutants_by_time_nl-post.png')
|
<commit_before><commit_msg>Add snippet - chart total area at each time step<commit_after>
|
# Unneeded charting code that was removed from a jupyter notebook. Stored here as an example for later
# Draws a chart with the total area of multiple protin receptors at each time step
tmpdf = totals65.loc[(totals65['Receptor'].isin(['M1', 'M5', 'M7', 'M22', 'M26'])) &
(totals65['Experiment Step'] == '-nl-post')]
pal = ['black', 'blue', 'red', 'orange', 'green']
g = sns.FacetGrid(tmpdf, col='Time Point', col_wrap=3,
size=5, ylim=(0,100), xlim=(0,100),
palette=pal,
hue='Receptor',
hue_order=['M1', 'M5', 'M7', 'M22', 'M26'],
hue_kws=dict(marker=['^', 'v', '*', '+', 'x']))
g.map(plt.scatter, 'Total Number Scaled', 'Total Area Scaled')
g.add_legend()
g.savefig('mutants_by_time_nl-post.png')
|
Add snippet - chart total area at each time step# Unneeded charting code that was removed from a jupyter notebook. Stored here as an example for later
# Draws a chart with the total area of multiple protin receptors at each time step
tmpdf = totals65.loc[(totals65['Receptor'].isin(['M1', 'M5', 'M7', 'M22', 'M26'])) &
(totals65['Experiment Step'] == '-nl-post')]
pal = ['black', 'blue', 'red', 'orange', 'green']
g = sns.FacetGrid(tmpdf, col='Time Point', col_wrap=3,
size=5, ylim=(0,100), xlim=(0,100),
palette=pal,
hue='Receptor',
hue_order=['M1', 'M5', 'M7', 'M22', 'M26'],
hue_kws=dict(marker=['^', 'v', '*', '+', 'x']))
g.map(plt.scatter, 'Total Number Scaled', 'Total Area Scaled')
g.add_legend()
g.savefig('mutants_by_time_nl-post.png')
|
<commit_before><commit_msg>Add snippet - chart total area at each time step<commit_after># Unneeded charting code that was removed from a jupyter notebook. Stored here as an example for later
# Draws a chart with the total area of multiple protin receptors at each time step
tmpdf = totals65.loc[(totals65['Receptor'].isin(['M1', 'M5', 'M7', 'M22', 'M26'])) &
(totals65['Experiment Step'] == '-nl-post')]
pal = ['black', 'blue', 'red', 'orange', 'green']
g = sns.FacetGrid(tmpdf, col='Time Point', col_wrap=3,
size=5, ylim=(0,100), xlim=(0,100),
palette=pal,
hue='Receptor',
hue_order=['M1', 'M5', 'M7', 'M22', 'M26'],
hue_kws=dict(marker=['^', 'v', '*', '+', 'x']))
g.map(plt.scatter, 'Total Number Scaled', 'Total Area Scaled')
g.add_legend()
g.savefig('mutants_by_time_nl-post.png')
|
|
0e6c2ec976be5381c8f679657afe8c02af28d9fa
|
FindingLaneLines/ColorSelector.py
|
FindingLaneLines/ColorSelector.py
|
# Modify the values of the variables red_threshold, green_threshold,
# and blue_threshold until you are able to retain as much of the lane
# lines as possible, while getting rid of most of the other stuff.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def ColorSelector():
# Read in the image and print out some stats
image = (mpimg.imread('test.png') * 255).astype('uint8')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print('Esta es la variable rgb_threshold: ', rgb_threshold)
# Do a bitwise or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:, :, 0] < rgb_threshold[0]) \
| (image[:, :, 1] < rgb_threshold[1]) \
| (image[:, :, 2] < rgb_threshold[2])
print('Esta es la variable thresholds: ', thresholds)
color_select[thresholds] = [0, 0, 0]
# plt.imshow(color_select)
# Uncomment the following code if you are running the code
# locally and wish to save the image
mpimg.imsave("test-after.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
if __name__ == '__main__':
ColorSelector()
|
Add script to modify the values of the variables red_threshold, green_threshold, and blue_threshold until you are able to retain as much of the lane lines as possible, while getting rid of most of the other stuff
|
feat: Add script to modify the values of the variables
red_threshold, green_threshold, and blue_threshold until you are able
to retain as much of the lane lines as possible, while getting rid
of most of the other stuff
|
Python
|
mit
|
aguijarro/SelfDrivingCar
|
feat: Add script to modify the values of the variables
red_threshold, green_threshold, and blue_threshold until you are able
to retain as much of the lane lines as possible, while getting rid
of most of the other stuff
|
# Modify the values of the variables red_threshold, green_threshold,
# and blue_threshold until you are able to retain as much of the lane
# lines as possible, while getting rid of most of the other stuff.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def ColorSelector():
# Read in the image and print out some stats
image = (mpimg.imread('test.png') * 255).astype('uint8')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print('Esta es la variable rgb_threshold: ', rgb_threshold)
# Do a bitwise or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:, :, 0] < rgb_threshold[0]) \
| (image[:, :, 1] < rgb_threshold[1]) \
| (image[:, :, 2] < rgb_threshold[2])
print('Esta es la variable thresholds: ', thresholds)
color_select[thresholds] = [0, 0, 0]
# plt.imshow(color_select)
# Uncomment the following code if you are running the code
# locally and wish to save the image
mpimg.imsave("test-after.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
if __name__ == '__main__':
ColorSelector()
|
<commit_before><commit_msg>feat: Add script to modify the values of the variables
red_threshold, green_threshold, and blue_threshold until you are able
to retain as much of the lane lines as possible, while getting rid
of most of the other stuff<commit_after>
|
# Modify the values of the variables red_threshold, green_threshold,
# and blue_threshold until you are able to retain as much of the lane
# lines as possible, while getting rid of most of the other stuff.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def ColorSelector():
# Read in the image and print out some stats
image = (mpimg.imread('test.png') * 255).astype('uint8')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print('Esta es la variable rgb_threshold: ', rgb_threshold)
# Do a bitwise or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:, :, 0] < rgb_threshold[0]) \
| (image[:, :, 1] < rgb_threshold[1]) \
| (image[:, :, 2] < rgb_threshold[2])
print('Esta es la variable thresholds: ', thresholds)
color_select[thresholds] = [0, 0, 0]
# plt.imshow(color_select)
# Uncomment the following code if you are running the code
# locally and wish to save the image
mpimg.imsave("test-after.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
if __name__ == '__main__':
ColorSelector()
|
feat: Add script to modify the values of the variables
red_threshold, green_threshold, and blue_threshold until you are able
to retain as much of the lane lines as possible, while getting rid
of most of the other stuff# Modify the values of the variables red_threshold, green_threshold,
# and blue_threshold until you are able to retain as much of the lane
# lines as possible, while getting rid of most of the other stuff.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def ColorSelector():
# Read in the image and print out some stats
image = (mpimg.imread('test.png') * 255).astype('uint8')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print('Esta es la variable rgb_threshold: ', rgb_threshold)
# Do a bitwise or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:, :, 0] < rgb_threshold[0]) \
| (image[:, :, 1] < rgb_threshold[1]) \
| (image[:, :, 2] < rgb_threshold[2])
print('Esta es la variable thresholds: ', thresholds)
color_select[thresholds] = [0, 0, 0]
# plt.imshow(color_select)
# Uncomment the following code if you are running the code
# locally and wish to save the image
mpimg.imsave("test-after.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
if __name__ == '__main__':
ColorSelector()
|
<commit_before><commit_msg>feat: Add script to modify the values of the variables
red_threshold, green_threshold, and blue_threshold until you are able
to retain as much of the lane lines as possible, while getting rid
of most of the other stuff<commit_after># Modify the values of the variables red_threshold, green_threshold,
# and blue_threshold until you are able to retain as much of the lane
# lines as possible, while getting rid of most of the other stuff.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def ColorSelector():
# Read in the image and print out some stats
image = (mpimg.imread('test.png') * 255).astype('uint8')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
print('Esta es la variable rgb_threshold: ', rgb_threshold)
# Do a bitwise or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:, :, 0] < rgb_threshold[0]) \
| (image[:, :, 1] < rgb_threshold[1]) \
| (image[:, :, 2] < rgb_threshold[2])
print('Esta es la variable thresholds: ', thresholds)
color_select[thresholds] = [0, 0, 0]
# plt.imshow(color_select)
# Uncomment the following code if you are running the code
# locally and wish to save the image
mpimg.imsave("test-after.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
if __name__ == '__main__':
ColorSelector()
|
|
ce34a3dbaa824429b91af76ed5882ddffc2d3b2b
|
examples/happy_birthday.py
|
examples/happy_birthday.py
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday')
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday', example="name=HUG&page=1")
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
Add example argument, for direct url
|
Add example argument, for direct url
|
Python
|
mit
|
STANAPO/hug,origingod/hug,jean/hug,MuhammadAlkarouri/hug,philiptzou/hug,jean/hug,giserh/hug,STANAPO/hug,MuhammadAlkarouri/hug,gbn972/hug,yasoob/hug,timothycrosley/hug,philiptzou/hug,yasoob/hug,giserh/hug,timothycrosley/hug,MuhammadAlkarouri/hug,shaunstanislaus/hug,timothycrosley/hug,janusnic/hug,janusnic/hug,alisaifee/hug,shaunstanislaus/hug,alisaifee/hug,gbn972/hug,origingod/hug
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday')
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
Add example argument, for direct url
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday', example="name=HUG&page=1")
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
<commit_before>"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday')
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
<commit_msg>Add example argument, for direct url<commit_after>
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday', example="name=HUG&page=1")
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday')
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
Add example argument, for direct url"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday', example="name=HUG&page=1")
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
<commit_before>"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday')
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
<commit_msg>Add example argument, for direct url<commit_after>"""A basic (single function) API written using Hug"""
import hug
@hug.get('/happy_birthday', example="name=HUG&page=1")
def happy_birthday(name, age:hug.types.number, **kwargs):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!".format(**locals())
|
744fffddc5655de6d7630e6c92111b6ce5af46f1
|
pombola/south_africa/management/commands/south_africa_sync_everypolitician_uuid.py
|
pombola/south_africa/management/commands/south_africa_sync_everypolitician_uuid.py
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync EveryPolitician UUID to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.id
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
uuid = id_lookup.get(str(person.id))
if uuid is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='everypolitician',
identifier=uuid,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
Add management command to sync EveryPolitician UUID
|
Add management command to sync EveryPolitician UUID
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add management command to sync EveryPolitician UUID
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync EveryPolitician UUID to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.id
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
uuid = id_lookup.get(str(person.id))
if uuid is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='everypolitician',
identifier=uuid,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
<commit_before><commit_msg>Add management command to sync EveryPolitician UUID<commit_after>
|
from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync EveryPolitician UUID to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.id
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
uuid = id_lookup.get(str(person.id))
if uuid is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='everypolitician',
identifier=uuid,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
Add management command to sync EveryPolitician UUIDfrom everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync EveryPolitician UUID to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.id
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
uuid = id_lookup.get(str(person.id))
if uuid is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='everypolitician',
identifier=uuid,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
<commit_before><commit_msg>Add management command to sync EveryPolitician UUID<commit_after>from everypolitician import EveryPolitician
from django.core.management.base import BaseCommand
from pombola.core.models import Person
class Command(BaseCommand):
help = "Sync EveryPolitician UUID to Person's identifiers array"
def add_arguments(self, parser):
parser.add_argument('everypolitician_countries_json_git_ref',
default='master', nargs='?',
help="A git ref from the everypolitician-data repo")
def handle(self, **options):
verbose_level = options['verbosity']
url_template = ('https://cdn.rawgit.com/everypolitician/everypolitician-data/'
'{git_ref}/countries.json')
url = url_template.format(git_ref=options['everypolitician_countries_json_git_ref'])
ep = EveryPolitician(countries_json_url=url)
south_africa_assembly = ep.country('South-Africa').legislature('Assembly').popolo()
id_lookup = {}
for popolo_person in south_africa_assembly.persons:
id_lookup[popolo_person.identifier_value('peoples_assembly')] = popolo_person.id
error_msg = u"No EveryPolitician UUID found for {0.id} {0.name} https://www.pa.org.za/person/{0.slug}/\n"
for person in Person.objects.filter(hidden=False):
uuid = id_lookup.get(str(person.id))
if uuid is None:
verbose_level > 1 and self.stderr.write(error_msg.format(person))
continue
identifier, created = person.identifiers.get_or_create(
scheme='everypolitician',
identifier=uuid,
)
if verbose_level > 0:
if created:
msg = u"Created new identifier for {name}: {identifier}"
else:
msg = u"Existing identifier found for {name}: {identifier}"
self.stdout.write(msg.format(name=person.name, identifier=identifier.identifier))
|
|
b2c51babee88a53704219cb4c2a639c8e71ad621
|
tests/functions_tests/test_copy.py
|
tests/functions_tests/test_copy.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
def test_check_forward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)
def test_check_backward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
y.grad = self.gy
y.backward()
gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
|
Add unittest for Copy function
|
Add unittest for Copy function
|
Python
|
mit
|
tscohen/chainer,okuta/chainer,tkerola/chainer,keisuke-umezawa/chainer,sou81821/chainer,ktnyt/chainer,aonotas/chainer,truongdq/chainer,jnishi/chainer,keisuke-umezawa/chainer,hvy/chainer,kiyukuta/chainer,keisuke-umezawa/chainer,1986ks/chainer,wkentaro/chainer,laysakura/chainer,niboshi/chainer,kuwa32/chainer,kikusu/chainer,jnishi/chainer,elviswf/chainer,yanweifu/chainer,chainer/chainer,wavelets/chainer,cemoody/chainer,cupy/cupy,rezoo/chainer,sinhrks/chainer,ktnyt/chainer,t-abe/chainer,anaruse/chainer,cupy/cupy,ytoyama/yans_chainer_hackathon,minhpqn/chainer,wkentaro/chainer,pfnet/chainer,ikasumi/chainer,keisuke-umezawa/chainer,wkentaro/chainer,t-abe/chainer,bayerj/chainer,ktnyt/chainer,benob/chainer,ktnyt/chainer,chainer/chainer,ysekky/chainer,muupan/chainer,niboshi/chainer,woodshop/complex-chainer,hvy/chainer,woodshop/chainer,AlpacaDB/chainer,jfsantos/chainer,okuta/chainer,muupan/chainer,hidenori-t/chainer,jnishi/chainer,benob/chainer,okuta/chainer,truongdq/chainer,cupy/cupy,sinhrks/chainer,chainer/chainer,masia02/chainer,delta2323/chainer,jnishi/chainer,hvy/chainer,chainer/chainer,Kaisuke5/chainer,wkentaro/chainer,cupy/cupy,kikusu/chainer,niboshi/chainer,tigerneil/chainer,kashif/chainer,AlpacaDB/chainer,okuta/chainer,umitanuki/chainer,niboshi/chainer,hvy/chainer,ronekko/chainer
|
Add unittest for Copy function
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
def test_check_forward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)
def test_check_backward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
y.grad = self.gy
y.backward()
gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
|
<commit_before><commit_msg>Add unittest for Copy function<commit_after>
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
def test_check_forward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)
def test_check_backward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
y.grad = self.gy
y.backward()
gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
|
Add unittest for Copy functionimport unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
def test_check_forward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)
def test_check_backward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
y.grad = self.gy
y.backward()
gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
|
<commit_before><commit_msg>Add unittest for Copy function<commit_after>import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
class Copy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
def test_check_forward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
gradient_check.assert_allclose(self.x_data, y.data, atol=0, rtol=0)
def test_check_backward_cpu(self):
x = chainer.Variable(self.x_data)
y = functions.copy(x, -1)
y.grad = self.gy
y.backward()
gradient_check.assert_allclose(x.grad, self.gy, atol=0, rtol=0)
|
|
d386e327a2b04392ff98e7df9ff53c50322feec5
|
database/lawParsing.py
|
database/lawParsing.py
|
# encoding=utf8
import sys
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
tree = etree.parse("BJNR002190897.xml")
root = tree.getroot()
for child in root:
metadaten = child[0]
paragraph = metadaten.find("enbez")
if paragraph is not None:
paragraph = paragraph.text
if not "§" in paragraph:
continue
try:
titel = metadaten.find("titel").text
except:
titel = "kein Titel"
if titel and titel == "(weggefallen)":
gesetztesText = ""
print paragraph
print titel
print gesetztesText
continue
content = child[1][0].find("Content")
if content is not None:
gesetztesText = ""
for absatz in content:
try:
gesetztesText += absatz.text
except:
pass
print paragraph
print titel
print gesetztesText
|
Add basic xml parser for laws
|
Add basic xml parser for laws
|
Python
|
lgpl-2.1
|
Bensk1/jurassist,Bensk1/jurassist
|
Add basic xml parser for laws
|
# encoding=utf8
import sys
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
tree = etree.parse("BJNR002190897.xml")
root = tree.getroot()
for child in root:
metadaten = child[0]
paragraph = metadaten.find("enbez")
if paragraph is not None:
paragraph = paragraph.text
if not "§" in paragraph:
continue
try:
titel = metadaten.find("titel").text
except:
titel = "kein Titel"
if titel and titel == "(weggefallen)":
gesetztesText = ""
print paragraph
print titel
print gesetztesText
continue
content = child[1][0].find("Content")
if content is not None:
gesetztesText = ""
for absatz in content:
try:
gesetztesText += absatz.text
except:
pass
print paragraph
print titel
print gesetztesText
|
<commit_before><commit_msg>Add basic xml parser for laws<commit_after>
|
# encoding=utf8
import sys
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
tree = etree.parse("BJNR002190897.xml")
root = tree.getroot()
for child in root:
metadaten = child[0]
paragraph = metadaten.find("enbez")
if paragraph is not None:
paragraph = paragraph.text
if not "§" in paragraph:
continue
try:
titel = metadaten.find("titel").text
except:
titel = "kein Titel"
if titel and titel == "(weggefallen)":
gesetztesText = ""
print paragraph
print titel
print gesetztesText
continue
content = child[1][0].find("Content")
if content is not None:
gesetztesText = ""
for absatz in content:
try:
gesetztesText += absatz.text
except:
pass
print paragraph
print titel
print gesetztesText
|
Add basic xml parser for laws# encoding=utf8
import sys
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
tree = etree.parse("BJNR002190897.xml")
root = tree.getroot()
for child in root:
metadaten = child[0]
paragraph = metadaten.find("enbez")
if paragraph is not None:
paragraph = paragraph.text
if not "§" in paragraph:
continue
try:
titel = metadaten.find("titel").text
except:
titel = "kein Titel"
if titel and titel == "(weggefallen)":
gesetztesText = ""
print paragraph
print titel
print gesetztesText
continue
content = child[1][0].find("Content")
if content is not None:
gesetztesText = ""
for absatz in content:
try:
gesetztesText += absatz.text
except:
pass
print paragraph
print titel
print gesetztesText
|
<commit_before><commit_msg>Add basic xml parser for laws<commit_after># encoding=utf8
import sys
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
tree = etree.parse("BJNR002190897.xml")
root = tree.getroot()
for child in root:
metadaten = child[0]
paragraph = metadaten.find("enbez")
if paragraph is not None:
paragraph = paragraph.text
if not "§" in paragraph:
continue
try:
titel = metadaten.find("titel").text
except:
titel = "kein Titel"
if titel and titel == "(weggefallen)":
gesetztesText = ""
print paragraph
print titel
print gesetztesText
continue
content = child[1][0].find("Content")
if content is not None:
gesetztesText = ""
for absatz in content:
try:
gesetztesText += absatz.text
except:
pass
print paragraph
print titel
print gesetztesText
|
|
7feb7301b2cefd568fa65ee9907a1a179ea41f1c
|
tests/basics/op_precedence.py
|
tests/basics/op_precedence.py
|
# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-1)
# (expr...)
print((2 + 2) * 2)
|
Add tests for arithmetic operators precedence.
|
tests/basics: Add tests for arithmetic operators precedence.
|
Python
|
mit
|
trezor/micropython,deshipu/micropython,AriZuu/micropython,selste/micropython,trezor/micropython,pfalcon/micropython,adafruit/circuitpython,HenrikSolver/micropython,tralamazza/micropython,torwag/micropython,dmazzella/micropython,SHA2017-badge/micropython-esp32,oopy/micropython,TDAbboud/micropython,kerneltask/micropython,tralamazza/micropython,ryannathans/micropython,lowRISC/micropython,kerneltask/micropython,AriZuu/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,henriknelson/micropython,bvernoux/micropython,TDAbboud/micropython,oopy/micropython,oopy/micropython,selste/micropython,Timmenem/micropython,HenrikSolver/micropython,ryannathans/micropython,micropython/micropython-esp32,chrisdearman/micropython,pozetroninc/micropython,MrSurly/micropython-esp32,PappaPeppar/micropython,pramasoul/micropython,pozetroninc/micropython,Timmenem/micropython,torwag/micropython,selste/micropython,tobbad/micropython,oopy/micropython,infinnovation/micropython,MrSurly/micropython-esp32,torwag/micropython,pozetroninc/micropython,pramasoul/micropython,micropython/micropython-esp32,trezor/micropython,AriZuu/micropython,alex-robbins/micropython,adafruit/circuitpython,lowRISC/micropython,pfalcon/micropython,blazewicz/micropython,tralamazza/micropython,pramasoul/micropython,chrisdearman/micropython,pfalcon/micropython,swegener/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,lowRISC/micropython,adafruit/circuitpython,henriknelson/micropython,pfalcon/micropython,infinnovation/micropython,PappaPeppar/micropython,dmazzella/micropython,AriZuu/micropython,kerneltask/micropython,PappaPeppar/micropython,tobbad/micropython,torwag/micropython,oopy/micropython,trezor/micropython,blazewicz/micropython,chrisdearman/micropython,TDAbboud/micropython,bvernoux/micropython,chrisdearman/micropython,alex-robbins/micropython,Timmenem/micropython,blazewicz/micropython,ryannathans/micropython,tobbad/micropython,pramasoul/micropython,swegener/micropython,AriZuu/micropython,HenrikSolver/micropython,infinnovation/micropython,henriknelson/micropython,dmazzella/micropython,MrSurly/micropython-esp32,infinnovation/micropython,alex-robbins/micropython,bvernoux/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,trezor/micropython,MrSurly/micropython,lowRISC/micropython,blazewicz/micropython,deshipu/micropython,micropython/micropython-esp32,selste/micropython,dmazzella/micropython,chrisdearman/micropython,infinnovation/micropython,bvernoux/micropython,henriknelson/micropython,swegener/micropython,deshipu/micropython,TDAbboud/micropython,ryannathans/micropython,tralamazza/micropython,adafruit/circuitpython,pfalcon/micropython,adafruit/circuitpython,ryannathans/micropython,blazewicz/micropython,MrSurly/micropython,swegener/micropython,micropython/micropython-esp32,swegener/micropython,adafruit/micropython,TDAbboud/micropython,pozetroninc/micropython,pramasoul/micropython,Timmenem/micropython,PappaPeppar/micropython,tobbad/micropython,MrSurly/micropython,torwag/micropython,adafruit/micropython,tobbad/micropython,deshipu/micropython,kerneltask/micropython,adafruit/micropython,micropython/micropython-esp32,bvernoux/micropython,pozetroninc/micropython,kerneltask/micropython,HenrikSolver/micropython,PappaPeppar/micropython,alex-robbins/micropython,alex-robbins/micropython,lowRISC/micropython,deshipu/micropython,henriknelson/micropython,adafruit/micropython,MrSurly/micropython-esp32,MrSurly/micropython,SHA2017-badge/micropython-esp32,Timmenem/micropython,HenrikSolver/micropython,selste/micropython,MrSurly/micropython-esp32
|
tests/basics: Add tests for arithmetic operators precedence.
|
# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-1)
# (expr...)
print((2 + 2) * 2)
|
<commit_before><commit_msg>tests/basics: Add tests for arithmetic operators precedence.<commit_after>
|
# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-1)
# (expr...)
print((2 + 2) * 2)
|
tests/basics: Add tests for arithmetic operators precedence.# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-1)
# (expr...)
print((2 + 2) * 2)
|
<commit_before><commit_msg>tests/basics: Add tests for arithmetic operators precedence.<commit_after># see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-1)
# (expr...)
print((2 + 2) * 2)
|
|
60c1935b3d91f361b072c23889596d4d98ee9ea3
|
spreadsheet.py
|
spreadsheet.py
|
from database import DeveloperInfo
import csv
import re
def get_names(full_name):
match = re.match(r'(\w+) ([\w ]+)', full_name)
if match is None:
return None
return match.group(1), match.group(2)
def load_roster(db_session, csv_file):
spreadsheet = csv.DictReader(csv_file)
devs = []
for row in spreadsheet:
# Try to get the dev, if we already have them in the database
d = db_session.query(DeveloperInfo).filter(DeveloperInfo.email==row['Email']).first()
if d is None:
d = DeveloperInfo()
d.id = max([DeveloperInfo.next_id()] + [x.id + 1 for x in devs])
# Try to split the name into first/last
names = get_names(row['Full'])
if names is None:
print '\tUnable to split name "{}"'.format(row['Full'])
continue
d.first_name, d.last_name = names
d.preferred_name=row['Preferred']
d.email=row['Email']
d.github_username=row['Username']
d.team=row['Team']
d.planning_to_compete=row['Wants to Compete'] == "Yep"
d.added_manually = False
devs.append(d)
return devs
|
Add a module for loading Google Sheets
|
Add a module for loading Google Sheets
|
Python
|
bsd-3-clause
|
siggame/ng-attendance,siggame/ng-attendance
|
Add a module for loading Google Sheets
|
from database import DeveloperInfo
import csv
import re
def get_names(full_name):
match = re.match(r'(\w+) ([\w ]+)', full_name)
if match is None:
return None
return match.group(1), match.group(2)
def load_roster(db_session, csv_file):
spreadsheet = csv.DictReader(csv_file)
devs = []
for row in spreadsheet:
# Try to get the dev, if we already have them in the database
d = db_session.query(DeveloperInfo).filter(DeveloperInfo.email==row['Email']).first()
if d is None:
d = DeveloperInfo()
d.id = max([DeveloperInfo.next_id()] + [x.id + 1 for x in devs])
# Try to split the name into first/last
names = get_names(row['Full'])
if names is None:
print '\tUnable to split name "{}"'.format(row['Full'])
continue
d.first_name, d.last_name = names
d.preferred_name=row['Preferred']
d.email=row['Email']
d.github_username=row['Username']
d.team=row['Team']
d.planning_to_compete=row['Wants to Compete'] == "Yep"
d.added_manually = False
devs.append(d)
return devs
|
<commit_before><commit_msg>Add a module for loading Google Sheets<commit_after>
|
from database import DeveloperInfo
import csv
import re
def get_names(full_name):
match = re.match(r'(\w+) ([\w ]+)', full_name)
if match is None:
return None
return match.group(1), match.group(2)
def load_roster(db_session, csv_file):
spreadsheet = csv.DictReader(csv_file)
devs = []
for row in spreadsheet:
# Try to get the dev, if we already have them in the database
d = db_session.query(DeveloperInfo).filter(DeveloperInfo.email==row['Email']).first()
if d is None:
d = DeveloperInfo()
d.id = max([DeveloperInfo.next_id()] + [x.id + 1 for x in devs])
# Try to split the name into first/last
names = get_names(row['Full'])
if names is None:
print '\tUnable to split name "{}"'.format(row['Full'])
continue
d.first_name, d.last_name = names
d.preferred_name=row['Preferred']
d.email=row['Email']
d.github_username=row['Username']
d.team=row['Team']
d.planning_to_compete=row['Wants to Compete'] == "Yep"
d.added_manually = False
devs.append(d)
return devs
|
Add a module for loading Google Sheetsfrom database import DeveloperInfo
import csv
import re
def get_names(full_name):
match = re.match(r'(\w+) ([\w ]+)', full_name)
if match is None:
return None
return match.group(1), match.group(2)
def load_roster(db_session, csv_file):
spreadsheet = csv.DictReader(csv_file)
devs = []
for row in spreadsheet:
# Try to get the dev, if we already have them in the database
d = db_session.query(DeveloperInfo).filter(DeveloperInfo.email==row['Email']).first()
if d is None:
d = DeveloperInfo()
d.id = max([DeveloperInfo.next_id()] + [x.id + 1 for x in devs])
# Try to split the name into first/last
names = get_names(row['Full'])
if names is None:
print '\tUnable to split name "{}"'.format(row['Full'])
continue
d.first_name, d.last_name = names
d.preferred_name=row['Preferred']
d.email=row['Email']
d.github_username=row['Username']
d.team=row['Team']
d.planning_to_compete=row['Wants to Compete'] == "Yep"
d.added_manually = False
devs.append(d)
return devs
|
<commit_before><commit_msg>Add a module for loading Google Sheets<commit_after>from database import DeveloperInfo
import csv
import re
def get_names(full_name):
match = re.match(r'(\w+) ([\w ]+)', full_name)
if match is None:
return None
return match.group(1), match.group(2)
def load_roster(db_session, csv_file):
spreadsheet = csv.DictReader(csv_file)
devs = []
for row in spreadsheet:
# Try to get the dev, if we already have them in the database
d = db_session.query(DeveloperInfo).filter(DeveloperInfo.email==row['Email']).first()
if d is None:
d = DeveloperInfo()
d.id = max([DeveloperInfo.next_id()] + [x.id + 1 for x in devs])
# Try to split the name into first/last
names = get_names(row['Full'])
if names is None:
print '\tUnable to split name "{}"'.format(row['Full'])
continue
d.first_name, d.last_name = names
d.preferred_name=row['Preferred']
d.email=row['Email']
d.github_username=row['Username']
d.team=row['Team']
d.planning_to_compete=row['Wants to Compete'] == "Yep"
d.added_manually = False
devs.append(d)
return devs
|
|
78912c9e22e98b4d9330625723b06c866617f14f
|
wagtail/migrations/0075_populate_latest_revision_and_revision_object_str.py
|
wagtail/migrations/0075_populate_latest_revision_and_revision_object_str.py
|
# Generated by Django 4.0.3 on 2022-05-26 13:58
from django.db import migrations, models
from django.db.models.functions import Cast
def populate_latest_revision(apps, schema_editor):
Page = apps.get_model("wagtailcore.Page")
Revision = apps.get_model("wagtailcore.Revision")
latest_revision_id = models.Subquery(
Revision.objects.filter(
content_type_id=models.OuterRef("content_type_id"),
object_id=Cast(models.OuterRef("pk"), models.CharField()),
)
.order_by("-created_at", "-id")
.values("pk")[:1]
)
Page.objects.all().update(latest_revision_id=latest_revision_id)
def populate_revision_object_str(apps, schema_editor):
Revision = apps.get_model("wagtailcore.Revision")
Revision.objects.all().update(object_str=models.F("content__title"))
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0074_revision_object_str"),
]
operations = [
migrations.RunPython(
populate_latest_revision,
migrations.RunPython.noop,
),
migrations.RunPython(
populate_revision_object_str,
migrations.RunPython.noop,
),
]
|
Make migration for backfilling pages' `latest_revision` and revisions' `object_str`
|
Make migration for backfilling pages' `latest_revision` and revisions' `object_str`
|
Python
|
bsd-3-clause
|
wagtail/wagtail,rsalmaso/wagtail,thenewguy/wagtail,wagtail/wagtail,zerolab/wagtail,wagtail/wagtail,thenewguy/wagtail,zerolab/wagtail,zerolab/wagtail,wagtail/wagtail,thenewguy/wagtail,rsalmaso/wagtail,thenewguy/wagtail,thenewguy/wagtail,zerolab/wagtail,rsalmaso/wagtail,wagtail/wagtail,rsalmaso/wagtail,rsalmaso/wagtail,zerolab/wagtail
|
Make migration for backfilling pages' `latest_revision` and revisions' `object_str`
|
# Generated by Django 4.0.3 on 2022-05-26 13:58
from django.db import migrations, models
from django.db.models.functions import Cast
def populate_latest_revision(apps, schema_editor):
Page = apps.get_model("wagtailcore.Page")
Revision = apps.get_model("wagtailcore.Revision")
latest_revision_id = models.Subquery(
Revision.objects.filter(
content_type_id=models.OuterRef("content_type_id"),
object_id=Cast(models.OuterRef("pk"), models.CharField()),
)
.order_by("-created_at", "-id")
.values("pk")[:1]
)
Page.objects.all().update(latest_revision_id=latest_revision_id)
def populate_revision_object_str(apps, schema_editor):
Revision = apps.get_model("wagtailcore.Revision")
Revision.objects.all().update(object_str=models.F("content__title"))
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0074_revision_object_str"),
]
operations = [
migrations.RunPython(
populate_latest_revision,
migrations.RunPython.noop,
),
migrations.RunPython(
populate_revision_object_str,
migrations.RunPython.noop,
),
]
|
<commit_before><commit_msg>Make migration for backfilling pages' `latest_revision` and revisions' `object_str`<commit_after>
|
# Generated by Django 4.0.3 on 2022-05-26 13:58
from django.db import migrations, models
from django.db.models.functions import Cast
def populate_latest_revision(apps, schema_editor):
Page = apps.get_model("wagtailcore.Page")
Revision = apps.get_model("wagtailcore.Revision")
latest_revision_id = models.Subquery(
Revision.objects.filter(
content_type_id=models.OuterRef("content_type_id"),
object_id=Cast(models.OuterRef("pk"), models.CharField()),
)
.order_by("-created_at", "-id")
.values("pk")[:1]
)
Page.objects.all().update(latest_revision_id=latest_revision_id)
def populate_revision_object_str(apps, schema_editor):
Revision = apps.get_model("wagtailcore.Revision")
Revision.objects.all().update(object_str=models.F("content__title"))
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0074_revision_object_str"),
]
operations = [
migrations.RunPython(
populate_latest_revision,
migrations.RunPython.noop,
),
migrations.RunPython(
populate_revision_object_str,
migrations.RunPython.noop,
),
]
|
Make migration for backfilling pages' `latest_revision` and revisions' `object_str`# Generated by Django 4.0.3 on 2022-05-26 13:58
from django.db import migrations, models
from django.db.models.functions import Cast
def populate_latest_revision(apps, schema_editor):
Page = apps.get_model("wagtailcore.Page")
Revision = apps.get_model("wagtailcore.Revision")
latest_revision_id = models.Subquery(
Revision.objects.filter(
content_type_id=models.OuterRef("content_type_id"),
object_id=Cast(models.OuterRef("pk"), models.CharField()),
)
.order_by("-created_at", "-id")
.values("pk")[:1]
)
Page.objects.all().update(latest_revision_id=latest_revision_id)
def populate_revision_object_str(apps, schema_editor):
Revision = apps.get_model("wagtailcore.Revision")
Revision.objects.all().update(object_str=models.F("content__title"))
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0074_revision_object_str"),
]
operations = [
migrations.RunPython(
populate_latest_revision,
migrations.RunPython.noop,
),
migrations.RunPython(
populate_revision_object_str,
migrations.RunPython.noop,
),
]
|
<commit_before><commit_msg>Make migration for backfilling pages' `latest_revision` and revisions' `object_str`<commit_after># Generated by Django 4.0.3 on 2022-05-26 13:58
from django.db import migrations, models
from django.db.models.functions import Cast
def populate_latest_revision(apps, schema_editor):
Page = apps.get_model("wagtailcore.Page")
Revision = apps.get_model("wagtailcore.Revision")
latest_revision_id = models.Subquery(
Revision.objects.filter(
content_type_id=models.OuterRef("content_type_id"),
object_id=Cast(models.OuterRef("pk"), models.CharField()),
)
.order_by("-created_at", "-id")
.values("pk")[:1]
)
Page.objects.all().update(latest_revision_id=latest_revision_id)
def populate_revision_object_str(apps, schema_editor):
Revision = apps.get_model("wagtailcore.Revision")
Revision.objects.all().update(object_str=models.F("content__title"))
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0074_revision_object_str"),
]
operations = [
migrations.RunPython(
populate_latest_revision,
migrations.RunPython.noop,
),
migrations.RunPython(
populate_revision_object_str,
migrations.RunPython.noop,
),
]
|
|
eea38231360115a1495948ccf18ddac16fc8630a
|
src/wellsfargo/tests/api/test_inquiry.py
|
src/wellsfargo/tests/api/test_inquiry.py
|
from rest_framework import status
from rest_framework.reverse import reverse
from wellsfargo.tests.base import BaseTest
from wellsfargo.tests import responses
import mock
class CreditLineInquiryTest(BaseTest):
@mock.patch('soap.get_transport')
def test_inquiry_successful(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_successful)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['account_number'], '9999999999999991')
self.assertEqual(response.data['status'], 'I0')
self.assertEqual(response.data['first_name'], 'John')
self.assertEqual(response.data['middle_initial'], 'Q')
self.assertEqual(response.data['last_name'], 'Smith')
self.assertEqual(response.data['phone_number'], '+15559998888')
self.assertEqual(response.data['address'], '123 First Street')
self.assertEqual(response.data['credit_limit'], '5000.00')
self.assertEqual(response.data['balance'], '0.00')
self.assertEqual(response.data['open_to_buy'], '5000.00')
self.assertEqual(response.data['last_payment_date'], None)
self.assertEqual(response.data['last_payment_amount'], '0.00')
self.assertEqual(response.data['payment_due_date'], None)
self.assertEqual(response.data['payment_due_amount'], '0.00')
@mock.patch('soap.get_transport')
def test_inquiry_failed(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_failed)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['account_number'], ['DEMO INVALID ACCOUNT'])
|
Add tests for inquiry API
|
Add tests for inquiry API
|
Python
|
isc
|
thelabnyc/django-oscar-wfrs,thelabnyc/django-oscar-wfrs
|
Add tests for inquiry API
|
from rest_framework import status
from rest_framework.reverse import reverse
from wellsfargo.tests.base import BaseTest
from wellsfargo.tests import responses
import mock
class CreditLineInquiryTest(BaseTest):
@mock.patch('soap.get_transport')
def test_inquiry_successful(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_successful)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['account_number'], '9999999999999991')
self.assertEqual(response.data['status'], 'I0')
self.assertEqual(response.data['first_name'], 'John')
self.assertEqual(response.data['middle_initial'], 'Q')
self.assertEqual(response.data['last_name'], 'Smith')
self.assertEqual(response.data['phone_number'], '+15559998888')
self.assertEqual(response.data['address'], '123 First Street')
self.assertEqual(response.data['credit_limit'], '5000.00')
self.assertEqual(response.data['balance'], '0.00')
self.assertEqual(response.data['open_to_buy'], '5000.00')
self.assertEqual(response.data['last_payment_date'], None)
self.assertEqual(response.data['last_payment_amount'], '0.00')
self.assertEqual(response.data['payment_due_date'], None)
self.assertEqual(response.data['payment_due_amount'], '0.00')
@mock.patch('soap.get_transport')
def test_inquiry_failed(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_failed)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['account_number'], ['DEMO INVALID ACCOUNT'])
|
<commit_before><commit_msg>Add tests for inquiry API<commit_after>
|
from rest_framework import status
from rest_framework.reverse import reverse
from wellsfargo.tests.base import BaseTest
from wellsfargo.tests import responses
import mock
class CreditLineInquiryTest(BaseTest):
@mock.patch('soap.get_transport')
def test_inquiry_successful(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_successful)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['account_number'], '9999999999999991')
self.assertEqual(response.data['status'], 'I0')
self.assertEqual(response.data['first_name'], 'John')
self.assertEqual(response.data['middle_initial'], 'Q')
self.assertEqual(response.data['last_name'], 'Smith')
self.assertEqual(response.data['phone_number'], '+15559998888')
self.assertEqual(response.data['address'], '123 First Street')
self.assertEqual(response.data['credit_limit'], '5000.00')
self.assertEqual(response.data['balance'], '0.00')
self.assertEqual(response.data['open_to_buy'], '5000.00')
self.assertEqual(response.data['last_payment_date'], None)
self.assertEqual(response.data['last_payment_amount'], '0.00')
self.assertEqual(response.data['payment_due_date'], None)
self.assertEqual(response.data['payment_due_amount'], '0.00')
@mock.patch('soap.get_transport')
def test_inquiry_failed(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_failed)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['account_number'], ['DEMO INVALID ACCOUNT'])
|
Add tests for inquiry APIfrom rest_framework import status
from rest_framework.reverse import reverse
from wellsfargo.tests.base import BaseTest
from wellsfargo.tests import responses
import mock
class CreditLineInquiryTest(BaseTest):
@mock.patch('soap.get_transport')
def test_inquiry_successful(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_successful)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['account_number'], '9999999999999991')
self.assertEqual(response.data['status'], 'I0')
self.assertEqual(response.data['first_name'], 'John')
self.assertEqual(response.data['middle_initial'], 'Q')
self.assertEqual(response.data['last_name'], 'Smith')
self.assertEqual(response.data['phone_number'], '+15559998888')
self.assertEqual(response.data['address'], '123 First Street')
self.assertEqual(response.data['credit_limit'], '5000.00')
self.assertEqual(response.data['balance'], '0.00')
self.assertEqual(response.data['open_to_buy'], '5000.00')
self.assertEqual(response.data['last_payment_date'], None)
self.assertEqual(response.data['last_payment_amount'], '0.00')
self.assertEqual(response.data['payment_due_date'], None)
self.assertEqual(response.data['payment_due_amount'], '0.00')
@mock.patch('soap.get_transport')
def test_inquiry_failed(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_failed)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['account_number'], ['DEMO INVALID ACCOUNT'])
|
<commit_before><commit_msg>Add tests for inquiry API<commit_after>from rest_framework import status
from rest_framework.reverse import reverse
from wellsfargo.tests.base import BaseTest
from wellsfargo.tests import responses
import mock
class CreditLineInquiryTest(BaseTest):
@mock.patch('soap.get_transport')
def test_inquiry_successful(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_successful)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['account_number'], '9999999999999991')
self.assertEqual(response.data['status'], 'I0')
self.assertEqual(response.data['first_name'], 'John')
self.assertEqual(response.data['middle_initial'], 'Q')
self.assertEqual(response.data['last_name'], 'Smith')
self.assertEqual(response.data['phone_number'], '+15559998888')
self.assertEqual(response.data['address'], '123 First Street')
self.assertEqual(response.data['credit_limit'], '5000.00')
self.assertEqual(response.data['balance'], '0.00')
self.assertEqual(response.data['open_to_buy'], '5000.00')
self.assertEqual(response.data['last_payment_date'], None)
self.assertEqual(response.data['last_payment_amount'], '0.00')
self.assertEqual(response.data['payment_due_date'], None)
self.assertEqual(response.data['payment_due_amount'], '0.00')
@mock.patch('soap.get_transport')
def test_inquiry_failed(self, get_transport):
get_transport.return_value = self._build_transport_with_reply(responses.inquiry_failed)
url = reverse('wfrs-api-acct-inquiry')
data = {
'account_number': '9999999999999991'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['account_number'], ['DEMO INVALID ACCOUNT'])
|
|
f1af6f813c54198f09d73ee504af23b060d510a2
|
pycroft/model/alembic/versions/6f1a37baa574_hosts_and_interfaces.py
|
pycroft/model/alembic/versions/6f1a37baa574_hosts_and_interfaces.py
|
"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
Add alembic script for renaming 'user_mac_change' property
|
Add alembic script for renaming 'user_mac_change' property
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft
|
Add alembic script for renaming 'user_mac_change' property
|
"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
<commit_before><commit_msg>Add alembic script for renaming 'user_mac_change' property<commit_after>
|
"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
Add alembic script for renaming 'user_mac_change' property"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
<commit_before><commit_msg>Add alembic script for renaming 'user_mac_change' property<commit_after>"""hosts and interfaces
Revision ID: 6f1a37baa574
Revises: cd588620e7d0
Create Date: 2018-09-17 15:38:56.401301
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String
import pycroft
# revision identifiers, used by Alembic.
revision = '6f1a37baa574'
down_revision = 'cd588620e7d0'
branch_labels = None
depends_on = None
def upgrade():
property = table('property', column('name', String))
op.execute(property.update().where(property.c.name == op.inline_literal('user_mac_change'))
.values({'name':op.inline_literal('user_hosts_change')}))
def downgrade():
property = table('property', column('name', String))
op.execute(property.update().where(
property.c.name == op.inline_literal('user_hosts_change'))
.values({'name': op.inline_literal('user_mac_change')}))
|
|
89e8553502d5b849a013cda58087f57cba9f1de7
|
article/migrations/0012_alter_content_field_to_have_verbose_name.py
|
article/migrations/0012_alter_content_field_to_have_verbose_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
Add migration for article content field verbose name introduction
|
Add migration for article content field verbose name introduction
|
Python
|
bsd-3-clause
|
PARINetwork/pari,PARINetwork/pari,PARINetwork/pari,PARINetwork/pari
|
Add migration for article content field verbose name introduction
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
<commit_before><commit_msg>Add migration for article content field verbose name introduction<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
Add migration for article content field verbose name introduction# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
<commit_before><commit_msg>Add migration for article content field verbose name introduction<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
|
f7d6e4c654414292783207be400f1769de7af892
|
Lib/glyphsLib/parser_test.py
|
Lib/glyphsLib/parser_test.py
|
# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import collections
import unittest
from glyphsLib.parser import Parser
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), collections.OrderedDict(expected))
def test_parse(self):
self.run_test(
'{myval=1; mylist=(1,2,3);}',
[('myval', '1'), ('mylist', ['1', '2', '3'])])
def test_trim_value(self):
self.run_test(
'{mystr="a\\"s\\077d\\U2019f";}',
[('mystr', 'a"s?d’f')])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=1;}trailing',
[('myval', '1')])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=@unexpected;}',
[('myval', '@unexpected')])
def test_with_utf8(self):
self.run_test(
'{mystr="Don’t crash";}',
[('mystr', 'Don’t crash')])
if __name__ == '__main__':
unittest.main()
|
Add some simple parser tests
|
Add some simple parser tests
|
Python
|
apache-2.0
|
googlei18n/glyphsLib,googlefonts/glyphsLib
|
Add some simple parser tests
|
# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import collections
import unittest
from glyphsLib.parser import Parser
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), collections.OrderedDict(expected))
def test_parse(self):
self.run_test(
'{myval=1; mylist=(1,2,3);}',
[('myval', '1'), ('mylist', ['1', '2', '3'])])
def test_trim_value(self):
self.run_test(
'{mystr="a\\"s\\077d\\U2019f";}',
[('mystr', 'a"s?d’f')])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=1;}trailing',
[('myval', '1')])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=@unexpected;}',
[('myval', '@unexpected')])
def test_with_utf8(self):
self.run_test(
'{mystr="Don’t crash";}',
[('mystr', 'Don’t crash')])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some simple parser tests<commit_after>
|
# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import collections
import unittest
from glyphsLib.parser import Parser
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), collections.OrderedDict(expected))
def test_parse(self):
self.run_test(
'{myval=1; mylist=(1,2,3);}',
[('myval', '1'), ('mylist', ['1', '2', '3'])])
def test_trim_value(self):
self.run_test(
'{mystr="a\\"s\\077d\\U2019f";}',
[('mystr', 'a"s?d’f')])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=1;}trailing',
[('myval', '1')])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=@unexpected;}',
[('myval', '@unexpected')])
def test_with_utf8(self):
self.run_test(
'{mystr="Don’t crash";}',
[('mystr', 'Don’t crash')])
if __name__ == '__main__':
unittest.main()
|
Add some simple parser tests# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import collections
import unittest
from glyphsLib.parser import Parser
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), collections.OrderedDict(expected))
def test_parse(self):
self.run_test(
'{myval=1; mylist=(1,2,3);}',
[('myval', '1'), ('mylist', ['1', '2', '3'])])
def test_trim_value(self):
self.run_test(
'{mystr="a\\"s\\077d\\U2019f";}',
[('mystr', 'a"s?d’f')])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=1;}trailing',
[('myval', '1')])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=@unexpected;}',
[('myval', '@unexpected')])
def test_with_utf8(self):
self.run_test(
'{mystr="Don’t crash";}',
[('mystr', 'Don’t crash')])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some simple parser tests<commit_after># coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import collections
import unittest
from glyphsLib.parser import Parser
class ParserTest(unittest.TestCase):
def run_test(self, text, expected):
parser = Parser()
self.assertEqual(parser.parse(text), collections.OrderedDict(expected))
def test_parse(self):
self.run_test(
'{myval=1; mylist=(1,2,3);}',
[('myval', '1'), ('mylist', ['1', '2', '3'])])
def test_trim_value(self):
self.run_test(
'{mystr="a\\"s\\077d\\U2019f";}',
[('mystr', 'a"s?d’f')])
def test_trailing_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=1;}trailing',
[('myval', '1')])
def test_unexpected_content(self):
with self.assertRaises(ValueError):
self.run_test(
'{myval=@unexpected;}',
[('myval', '@unexpected')])
def test_with_utf8(self):
self.run_test(
'{mystr="Don’t crash";}',
[('mystr', 'Don’t crash')])
if __name__ == '__main__':
unittest.main()
|
|
983ec0045f34f480059e5b2736ee5c8f5e84e700
|
symposion/speakers/migrations/0005_fix_migrations_accidentally_rolling_back_changes.py
|
symposion/speakers/migrations/0005_fix_migrations_accidentally_rolling_back_changes.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-25 04:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0004_make_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
|
Fix migration issue by creating new migration.
|
Fix migration issue by creating new migration.
Both migration #3 and migration #4 in symposion.speakers have not been
applied upstream. Because of this, migration #4 accidentally reverted
the changes made in migration #3.
Because there are currently deployed conference sites with this set of
incorrect migrations, it is necessary to create a new migration to fix
the issue instead of just amending migration #4.
|
Python
|
bsd-3-clause
|
pydata/symposion,pydata/symposion
|
Fix migration issue by creating new migration.
Both migration #3 and migration #4 in symposion.speakers have not been
applied upstream. Because of this, migration #4 accidentally reverted
the changes made in migration #3.
Because there are currently deployed conference sites with this set of
incorrect migrations, it is necessary to create a new migration to fix
the issue instead of just amending migration #4.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-25 04:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0004_make_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
|
<commit_before><commit_msg>Fix migration issue by creating new migration.
Both migration #3 and migration #4 in symposion.speakers have not been
applied upstream. Because of this, migration #4 accidentally reverted
the changes made in migration #3.
Because there are currently deployed conference sites with this set of
incorrect migrations, it is necessary to create a new migration to fix
the issue instead of just amending migration #4.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-25 04:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0004_make_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
|
Fix migration issue by creating new migration.
Both migration #3 and migration #4 in symposion.speakers have not been
applied upstream. Because of this, migration #4 accidentally reverted
the changes made in migration #3.
Because there are currently deployed conference sites with this set of
incorrect migrations, it is necessary to create a new migration to fix
the issue instead of just amending migration #4.# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-25 04:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0004_make_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
|
<commit_before><commit_msg>Fix migration issue by creating new migration.
Both migration #3 and migration #4 in symposion.speakers have not been
applied upstream. Because of this, migration #4 accidentally reverted
the changes made in migration #3.
Because there are currently deployed conference sites with this set of
incorrect migrations, it is necessary to create a new migration to fix
the issue instead of just amending migration #4.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-25 04:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0004_make_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
|
|
5f15f86f7c85887d17bc900aac8af30546c96096
|
rtc_tools/metrics_plotter.py
|
rtc_tools/metrics_plotter.py
|
#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots metrics from stdin.
Expected format:
PLOTTABLE_DATA: <json data>
Where json data has the following format:
{
"graph_name": "<graph name>",
"trace_name": "<test suite name>",
"units": "<units>",
"mean": <mean value>,
"std": <standard deviation value>,
"samples": [
{ "time": <sample time in us>, "value": <sample value> },
...
]
}
"""
import fileinput
import json
import matplotlib.pyplot as plt
LINE_PREFIX = 'PLOTTABLE_DATA: '
GRAPH_NAME = 'graph_name'
TRACE_NAME = 'trace_name'
UNITS = 'units'
MICROSECONDS_IN_SECOND = 1e6
def main():
metrics = []
for line in fileinput.input():
line = line.strip()
if line.startswith(LINE_PREFIX):
line = line.replace(LINE_PREFIX, '')
metrics.append(json.loads(line))
else:
print line
for metric in metrics:
figure = plt.figure()
figure.canvas.set_window_title(metric[TRACE_NAME])
x_values = []
y_values = []
start_x = None
for sample in metric['samples']:
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))
plt.xlabel('time (ms)')
plt.title(metric[GRAPH_NAME])
plt.plot(x_values, y_values)
plt.show()
if __name__ == '__main__':
main()
|
Add plotter script to plot internal test's stats
|
Add plotter script to plot internal test's stats
Bug: webrtc:10138
Change-Id: I2b9d55559cf6a123914e5a597a5bf6ea6e2aa4d7
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/152721
Commit-Queue: Artem Titov <1e27363ccc366a7ff0fdde48204d68d5ebfeccc5@webrtc.org>
Reviewed-by: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <76788ac3efd229e9d156135d9ceb7fef65c45da4@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#29177}
|
Python
|
bsd-3-clause
|
ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc
|
Add plotter script to plot internal test's stats
Bug: webrtc:10138
Change-Id: I2b9d55559cf6a123914e5a597a5bf6ea6e2aa4d7
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/152721
Commit-Queue: Artem Titov <1e27363ccc366a7ff0fdde48204d68d5ebfeccc5@webrtc.org>
Reviewed-by: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <76788ac3efd229e9d156135d9ceb7fef65c45da4@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#29177}
|
#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots metrics from stdin.
Expected format:
PLOTTABLE_DATA: <json data>
Where json data has the following format:
{
"graph_name": "<graph name>",
"trace_name": "<test suite name>",
"units": "<units>",
"mean": <mean value>,
"std": <standard deviation value>,
"samples": [
{ "time": <sample time in us>, "value": <sample value> },
...
]
}
"""
import fileinput
import json
import matplotlib.pyplot as plt
LINE_PREFIX = 'PLOTTABLE_DATA: '
GRAPH_NAME = 'graph_name'
TRACE_NAME = 'trace_name'
UNITS = 'units'
MICROSECONDS_IN_SECOND = 1e6
def main():
metrics = []
for line in fileinput.input():
line = line.strip()
if line.startswith(LINE_PREFIX):
line = line.replace(LINE_PREFIX, '')
metrics.append(json.loads(line))
else:
print line
for metric in metrics:
figure = plt.figure()
figure.canvas.set_window_title(metric[TRACE_NAME])
x_values = []
y_values = []
start_x = None
for sample in metric['samples']:
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))
plt.xlabel('time (ms)')
plt.title(metric[GRAPH_NAME])
plt.plot(x_values, y_values)
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add plotter script to plot internal test's stats
Bug: webrtc:10138
Change-Id: I2b9d55559cf6a123914e5a597a5bf6ea6e2aa4d7
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/152721
Commit-Queue: Artem Titov <1e27363ccc366a7ff0fdde48204d68d5ebfeccc5@webrtc.org>
Reviewed-by: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <76788ac3efd229e9d156135d9ceb7fef65c45da4@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#29177}<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots metrics from stdin.
Expected format:
PLOTTABLE_DATA: <json data>
Where json data has the following format:
{
"graph_name": "<graph name>",
"trace_name": "<test suite name>",
"units": "<units>",
"mean": <mean value>,
"std": <standard deviation value>,
"samples": [
{ "time": <sample time in us>, "value": <sample value> },
...
]
}
"""
import fileinput
import json
import matplotlib.pyplot as plt
LINE_PREFIX = 'PLOTTABLE_DATA: '
GRAPH_NAME = 'graph_name'
TRACE_NAME = 'trace_name'
UNITS = 'units'
MICROSECONDS_IN_SECOND = 1e6
def main():
metrics = []
for line in fileinput.input():
line = line.strip()
if line.startswith(LINE_PREFIX):
line = line.replace(LINE_PREFIX, '')
metrics.append(json.loads(line))
else:
print line
for metric in metrics:
figure = plt.figure()
figure.canvas.set_window_title(metric[TRACE_NAME])
x_values = []
y_values = []
start_x = None
for sample in metric['samples']:
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))
plt.xlabel('time (ms)')
plt.title(metric[GRAPH_NAME])
plt.plot(x_values, y_values)
plt.show()
if __name__ == '__main__':
main()
|
Add plotter script to plot internal test's stats
Bug: webrtc:10138
Change-Id: I2b9d55559cf6a123914e5a597a5bf6ea6e2aa4d7
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/152721
Commit-Queue: Artem Titov <1e27363ccc366a7ff0fdde48204d68d5ebfeccc5@webrtc.org>
Reviewed-by: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <76788ac3efd229e9d156135d9ceb7fef65c45da4@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#29177}#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots metrics from stdin.
Expected format:
PLOTTABLE_DATA: <json data>
Where json data has the following format:
{
"graph_name": "<graph name>",
"trace_name": "<test suite name>",
"units": "<units>",
"mean": <mean value>,
"std": <standard deviation value>,
"samples": [
{ "time": <sample time in us>, "value": <sample value> },
...
]
}
"""
import fileinput
import json
import matplotlib.pyplot as plt
LINE_PREFIX = 'PLOTTABLE_DATA: '
GRAPH_NAME = 'graph_name'
TRACE_NAME = 'trace_name'
UNITS = 'units'
MICROSECONDS_IN_SECOND = 1e6
def main():
metrics = []
for line in fileinput.input():
line = line.strip()
if line.startswith(LINE_PREFIX):
line = line.replace(LINE_PREFIX, '')
metrics.append(json.loads(line))
else:
print line
for metric in metrics:
figure = plt.figure()
figure.canvas.set_window_title(metric[TRACE_NAME])
x_values = []
y_values = []
start_x = None
for sample in metric['samples']:
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))
plt.xlabel('time (ms)')
plt.title(metric[GRAPH_NAME])
plt.plot(x_values, y_values)
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add plotter script to plot internal test's stats
Bug: webrtc:10138
Change-Id: I2b9d55559cf6a123914e5a597a5bf6ea6e2aa4d7
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/152721
Commit-Queue: Artem Titov <1e27363ccc366a7ff0fdde48204d68d5ebfeccc5@webrtc.org>
Reviewed-by: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <76788ac3efd229e9d156135d9ceb7fef65c45da4@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#29177}<commit_after>#!/usr/bin/env python
# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots metrics from stdin.
Expected format:
PLOTTABLE_DATA: <json data>
Where json data has the following format:
{
"graph_name": "<graph name>",
"trace_name": "<test suite name>",
"units": "<units>",
"mean": <mean value>,
"std": <standard deviation value>,
"samples": [
{ "time": <sample time in us>, "value": <sample value> },
...
]
}
"""
import fileinput
import json
import matplotlib.pyplot as plt
LINE_PREFIX = 'PLOTTABLE_DATA: '
GRAPH_NAME = 'graph_name'
TRACE_NAME = 'trace_name'
UNITS = 'units'
MICROSECONDS_IN_SECOND = 1e6
def main():
metrics = []
for line in fileinput.input():
line = line.strip()
if line.startswith(LINE_PREFIX):
line = line.replace(LINE_PREFIX, '')
metrics.append(json.loads(line))
else:
print line
for metric in metrics:
figure = plt.figure()
figure.canvas.set_window_title(metric[TRACE_NAME])
x_values = []
y_values = []
start_x = None
for sample in metric['samples']:
if start_x is None:
start_x = sample['time']
# Time is us, we want to show it in seconds.
x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND)
y_values.append(sample['value'])
plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS]))
plt.xlabel('time (ms)')
plt.title(metric[GRAPH_NAME])
plt.plot(x_values, y_values)
plt.show()
if __name__ == '__main__':
main()
|
|
f76a7100deb3e60507ed431feb5678ca97459f4b
|
lib/php_crud_api_transform.py
|
lib/php_crud_api_transform.py
|
def php_crud_api_transform(tables):
def get_objects(tables, table_name, where_index=None, match_value=None):
objects = []
for record in tables[table_name]['records']:
if where_index == None or (record[where_index] == match_value):
object = {}
columns = tables[table_name]['columns']
for column in columns:
index = columns.index(column)
object[column] = record[index]
for relation, reltable in tables.items():
for key, target in reltable.get('relations', {}).items():
if target == table_name + '.' + column:
relcols = reltable['columns']
column_indices = {value: relcols.index(value) for value in relcols}
object[relation] = get_objects(
tables, relation, column_indices[key], record[index])
objects.append(object)
return objects
tree = {}
for name, table in tables.items():
if not 'relations' in table:
tree[name] = get_objects(tables, name)
if 'results' in table:
tree['_results'] = table['results']
return tree
if __name__ == "__main__":
input = {"posts": {"columns": ["id","user_id","category_id","content"],"records": [[1,1,1,"blogstarted"]]},"post_tags": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","tag_id"],"records": [[1,1,1],[2,1,2]]},"categories": {"relations": {"id": "posts.category_id"},"columns": ["id","name"],"records": [[1,"anouncement"]]},"tags": {"relations": {"id": "post_tags.tag_id"},"columns": ["id","name"],"records": [[1,"funny"],[2,"important"]]},"comments": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","message"],"records": [[1,1,"great"],[2,1,"fantastic"]]}}
output = {"posts": [{"id": 1,"post_tags": [{"id": 1,"post_id": 1,"tag_id": 1,"tags": [{"id": 1,"name": "funny"}]},{"id": 2,"post_id": 1,"tag_id": 2,"tags": [{"id": 2,"name": "important"}]}],"comments": [{"id": 1,"post_id": 1,"message": "great"},{"id": 2,"post_id": 1,"message": "fantastic"}],"user_id": 1,"category_id": 1,"categories": [{"id": 1,"name": "anouncement"}],"content": "blogstarted"}]}
print(php_crud_api_transform(input) == output)
|
Transform algorithm in python 3
|
Transform algorithm in python 3
|
Python
|
mit
|
mevdschee/mysql-crud-api,mvdriel/php-crud-api,mevdschee/mysql-crud-api,mvdriel/php-crud-api,mevdschee/php-crud-api,mevdschee/php-crud-api
|
Transform algorithm in python 3
|
def php_crud_api_transform(tables):
def get_objects(tables, table_name, where_index=None, match_value=None):
objects = []
for record in tables[table_name]['records']:
if where_index == None or (record[where_index] == match_value):
object = {}
columns = tables[table_name]['columns']
for column in columns:
index = columns.index(column)
object[column] = record[index]
for relation, reltable in tables.items():
for key, target in reltable.get('relations', {}).items():
if target == table_name + '.' + column:
relcols = reltable['columns']
column_indices = {value: relcols.index(value) for value in relcols}
object[relation] = get_objects(
tables, relation, column_indices[key], record[index])
objects.append(object)
return objects
tree = {}
for name, table in tables.items():
if not 'relations' in table:
tree[name] = get_objects(tables, name)
if 'results' in table:
tree['_results'] = table['results']
return tree
if __name__ == "__main__":
input = {"posts": {"columns": ["id","user_id","category_id","content"],"records": [[1,1,1,"blogstarted"]]},"post_tags": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","tag_id"],"records": [[1,1,1],[2,1,2]]},"categories": {"relations": {"id": "posts.category_id"},"columns": ["id","name"],"records": [[1,"anouncement"]]},"tags": {"relations": {"id": "post_tags.tag_id"},"columns": ["id","name"],"records": [[1,"funny"],[2,"important"]]},"comments": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","message"],"records": [[1,1,"great"],[2,1,"fantastic"]]}}
output = {"posts": [{"id": 1,"post_tags": [{"id": 1,"post_id": 1,"tag_id": 1,"tags": [{"id": 1,"name": "funny"}]},{"id": 2,"post_id": 1,"tag_id": 2,"tags": [{"id": 2,"name": "important"}]}],"comments": [{"id": 1,"post_id": 1,"message": "great"},{"id": 2,"post_id": 1,"message": "fantastic"}],"user_id": 1,"category_id": 1,"categories": [{"id": 1,"name": "anouncement"}],"content": "blogstarted"}]}
print(php_crud_api_transform(input) == output)
|
<commit_before><commit_msg>Transform algorithm in python 3<commit_after>
|
def php_crud_api_transform(tables):
def get_objects(tables, table_name, where_index=None, match_value=None):
objects = []
for record in tables[table_name]['records']:
if where_index == None or (record[where_index] == match_value):
object = {}
columns = tables[table_name]['columns']
for column in columns:
index = columns.index(column)
object[column] = record[index]
for relation, reltable in tables.items():
for key, target in reltable.get('relations', {}).items():
if target == table_name + '.' + column:
relcols = reltable['columns']
column_indices = {value: relcols.index(value) for value in relcols}
object[relation] = get_objects(
tables, relation, column_indices[key], record[index])
objects.append(object)
return objects
tree = {}
for name, table in tables.items():
if not 'relations' in table:
tree[name] = get_objects(tables, name)
if 'results' in table:
tree['_results'] = table['results']
return tree
if __name__ == "__main__":
input = {"posts": {"columns": ["id","user_id","category_id","content"],"records": [[1,1,1,"blogstarted"]]},"post_tags": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","tag_id"],"records": [[1,1,1],[2,1,2]]},"categories": {"relations": {"id": "posts.category_id"},"columns": ["id","name"],"records": [[1,"anouncement"]]},"tags": {"relations": {"id": "post_tags.tag_id"},"columns": ["id","name"],"records": [[1,"funny"],[2,"important"]]},"comments": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","message"],"records": [[1,1,"great"],[2,1,"fantastic"]]}}
output = {"posts": [{"id": 1,"post_tags": [{"id": 1,"post_id": 1,"tag_id": 1,"tags": [{"id": 1,"name": "funny"}]},{"id": 2,"post_id": 1,"tag_id": 2,"tags": [{"id": 2,"name": "important"}]}],"comments": [{"id": 1,"post_id": 1,"message": "great"},{"id": 2,"post_id": 1,"message": "fantastic"}],"user_id": 1,"category_id": 1,"categories": [{"id": 1,"name": "anouncement"}],"content": "blogstarted"}]}
print(php_crud_api_transform(input) == output)
|
Transform algorithm in python 3def php_crud_api_transform(tables):
def get_objects(tables, table_name, where_index=None, match_value=None):
objects = []
for record in tables[table_name]['records']:
if where_index == None or (record[where_index] == match_value):
object = {}
columns = tables[table_name]['columns']
for column in columns:
index = columns.index(column)
object[column] = record[index]
for relation, reltable in tables.items():
for key, target in reltable.get('relations', {}).items():
if target == table_name + '.' + column:
relcols = reltable['columns']
column_indices = {value: relcols.index(value) for value in relcols}
object[relation] = get_objects(
tables, relation, column_indices[key], record[index])
objects.append(object)
return objects
tree = {}
for name, table in tables.items():
if not 'relations' in table:
tree[name] = get_objects(tables, name)
if 'results' in table:
tree['_results'] = table['results']
return tree
if __name__ == "__main__":
input = {"posts": {"columns": ["id","user_id","category_id","content"],"records": [[1,1,1,"blogstarted"]]},"post_tags": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","tag_id"],"records": [[1,1,1],[2,1,2]]},"categories": {"relations": {"id": "posts.category_id"},"columns": ["id","name"],"records": [[1,"anouncement"]]},"tags": {"relations": {"id": "post_tags.tag_id"},"columns": ["id","name"],"records": [[1,"funny"],[2,"important"]]},"comments": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","message"],"records": [[1,1,"great"],[2,1,"fantastic"]]}}
output = {"posts": [{"id": 1,"post_tags": [{"id": 1,"post_id": 1,"tag_id": 1,"tags": [{"id": 1,"name": "funny"}]},{"id": 2,"post_id": 1,"tag_id": 2,"tags": [{"id": 2,"name": "important"}]}],"comments": [{"id": 1,"post_id": 1,"message": "great"},{"id": 2,"post_id": 1,"message": "fantastic"}],"user_id": 1,"category_id": 1,"categories": [{"id": 1,"name": "anouncement"}],"content": "blogstarted"}]}
print(php_crud_api_transform(input) == output)
|
<commit_before><commit_msg>Transform algorithm in python 3<commit_after>def php_crud_api_transform(tables):
def get_objects(tables, table_name, where_index=None, match_value=None):
objects = []
for record in tables[table_name]['records']:
if where_index == None or (record[where_index] == match_value):
object = {}
columns = tables[table_name]['columns']
for column in columns:
index = columns.index(column)
object[column] = record[index]
for relation, reltable in tables.items():
for key, target in reltable.get('relations', {}).items():
if target == table_name + '.' + column:
relcols = reltable['columns']
column_indices = {value: relcols.index(value) for value in relcols}
object[relation] = get_objects(
tables, relation, column_indices[key], record[index])
objects.append(object)
return objects
tree = {}
for name, table in tables.items():
if not 'relations' in table:
tree[name] = get_objects(tables, name)
if 'results' in table:
tree['_results'] = table['results']
return tree
if __name__ == "__main__":
input = {"posts": {"columns": ["id","user_id","category_id","content"],"records": [[1,1,1,"blogstarted"]]},"post_tags": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","tag_id"],"records": [[1,1,1],[2,1,2]]},"categories": {"relations": {"id": "posts.category_id"},"columns": ["id","name"],"records": [[1,"anouncement"]]},"tags": {"relations": {"id": "post_tags.tag_id"},"columns": ["id","name"],"records": [[1,"funny"],[2,"important"]]},"comments": {"relations": {"post_id": "posts.id"},"columns": ["id","post_id","message"],"records": [[1,1,"great"],[2,1,"fantastic"]]}}
output = {"posts": [{"id": 1,"post_tags": [{"id": 1,"post_id": 1,"tag_id": 1,"tags": [{"id": 1,"name": "funny"}]},{"id": 2,"post_id": 1,"tag_id": 2,"tags": [{"id": 2,"name": "important"}]}],"comments": [{"id": 1,"post_id": 1,"message": "great"},{"id": 2,"post_id": 1,"message": "fantastic"}],"user_id": 1,"category_id": 1,"categories": [{"id": 1,"name": "anouncement"}],"content": "blogstarted"}]}
print(php_crud_api_transform(input) == output)
|
|
e9674f88660e14ce48239771b76310044fc37090
|
erpnext/patches/v7_0/remove_old_earning_deduction_doctypes.py
|
erpnext/patches/v7_0/remove_old_earning_deduction_doctypes.py
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
for d in frappe.db.sql("""select name from `tabCustom Field`
where dt in ('Salary Detail', 'Salary Component')"""):
frappe.get_doc("Custom Field", d[0]).save()
|
Create columns for custom fields in new table Salary Detail and Component
|
Create columns for custom fields in new table Salary Detail and Component
|
Python
|
agpl-3.0
|
Aptitudetech/ERPNext,njmube/erpnext,geekroot/erpnext,geekroot/erpnext,gsnbng/erpnext,gsnbng/erpnext,geekroot/erpnext,geekroot/erpnext,indictranstech/erpnext,gsnbng/erpnext,indictranstech/erpnext,indictranstech/erpnext,njmube/erpnext,njmube/erpnext,indictranstech/erpnext,gsnbng/erpnext,njmube/erpnext
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
Create columns for custom fields in new table Salary Detail and Component
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
for d in frappe.db.sql("""select name from `tabCustom Field`
where dt in ('Salary Detail', 'Salary Component')"""):
frappe.get_doc("Custom Field", d[0]).save()
|
<commit_before># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
<commit_msg>Create columns for custom fields in new table Salary Detail and Component<commit_after>
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
for d in frappe.db.sql("""select name from `tabCustom Field`
where dt in ('Salary Detail', 'Salary Component')"""):
frappe.get_doc("Custom Field", d[0]).save()
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
Create columns for custom fields in new table Salary Detail and Component# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
for d in frappe.db.sql("""select name from `tabCustom Field`
where dt in ('Salary Detail', 'Salary Component')"""):
frappe.get_doc("Custom Field", d[0]).save()
|
<commit_before># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
<commit_msg>Create columns for custom fields in new table Salary Detail and Component<commit_after># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Salary Component"):
for dt in ("Salary Structure Earning", "Salary Structure Deduction", "Salary Slip Earning",
"Salary Slip Deduction", "Earning Type", "Deduction Type"):
frappe.delete_doc("DocType", dt)
for d in frappe.db.sql("""select name from `tabCustom Field`
where dt in ('Salary Detail', 'Salary Component')"""):
frappe.get_doc("Custom Field", d[0]).save()
|
0f04271d90646ef696401c702d2478527848ac88
|
djangorest_alchemy/routers.py
|
djangorest_alchemy/routers.py
|
from rest_framework.routers import DefaultRouter
from rest_framework.routers import Route
class ReadOnlyRouter(DefaultRouter):
"""
A router for read-only APIs, which USES trailing slashes.
"""
routes = [
Route(url=r'^{prefix}{trailing_slash}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}),
Route(url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'})
]
|
Add router for read-only APIs
|
Add router for read-only APIs
|
Python
|
mit
|
dealertrack/djangorest-alchemy,pombredanne/djangorest-alchemy
|
Add router for read-only APIs
|
from rest_framework.routers import DefaultRouter
from rest_framework.routers import Route
class ReadOnlyRouter(DefaultRouter):
"""
A router for read-only APIs, which USES trailing slashes.
"""
routes = [
Route(url=r'^{prefix}{trailing_slash}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}),
Route(url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'})
]
|
<commit_before><commit_msg>Add router for read-only APIs<commit_after>
|
from rest_framework.routers import DefaultRouter
from rest_framework.routers import Route
class ReadOnlyRouter(DefaultRouter):
"""
A router for read-only APIs, which USES trailing slashes.
"""
routes = [
Route(url=r'^{prefix}{trailing_slash}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}),
Route(url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'})
]
|
Add router for read-only APIsfrom rest_framework.routers import DefaultRouter
from rest_framework.routers import Route
class ReadOnlyRouter(DefaultRouter):
"""
A router for read-only APIs, which USES trailing slashes.
"""
routes = [
Route(url=r'^{prefix}{trailing_slash}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}),
Route(url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'})
]
|
<commit_before><commit_msg>Add router for read-only APIs<commit_after>from rest_framework.routers import DefaultRouter
from rest_framework.routers import Route
class ReadOnlyRouter(DefaultRouter):
"""
A router for read-only APIs, which USES trailing slashes.
"""
routes = [
Route(url=r'^{prefix}{trailing_slash}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}),
Route(url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'})
]
|
|
eace3feab39e7dc44e83680147e6efdaff4ee4d7
|
scripts/ecs_clean_cluster.py
|
scripts/ecs_clean_cluster.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Remove unused Docker containers and images from an ECS cluster.
Usage:
ecs_clean_cluster.py --key=<PRIV_KEY> --cluster=<CLUSTER_NAME>
ecs_clean_cluster.py -h | --help
Options:
--cluster=<CLUSTER_NAME> Name of the ECS cluster to clean.
--key=<PRIV_KEY> Path to the SSH key for accessing EC2 instances.
"""
import subprocess
import boto3
import docopt
def get_ec2_dns_names(cluster):
"""
Generates the public DNS names of instances in the cluster.
"""
ecs = boto3.client('ecs')
resp = ecs.list_container_instances(cluster='api_cluster')
arns = resp['containerInstanceArns']
resp = ecs.describe_container_instances(cluster='api_cluster', containerInstances=arns)
instance_ids = [e['ec2InstanceId'] for e in resp['containerInstances']]
ec2 = boto3.client('ec2')
resp = ec2.describe_instances(InstanceIds=instance_ids)
for r in resp['Reservations']:
for i in r['Instances']:
yield i['PublicDnsName']
def main():
args = docopt.docopt(__doc__)
for name in get_ec2_dns_names(args['--cluster']):
print(f'*** {name}')
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rm $(docker ps -a -q)'
])
proc.communicate()
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rmi $(docker images -q)'
])
proc.communicate()
if __name__ == '__main__':
main()
|
Add a script for cleaning our ECS clusters
|
Add a script for cleaning our ECS clusters
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a script for cleaning our ECS clusters
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Remove unused Docker containers and images from an ECS cluster.
Usage:
ecs_clean_cluster.py --key=<PRIV_KEY> --cluster=<CLUSTER_NAME>
ecs_clean_cluster.py -h | --help
Options:
--cluster=<CLUSTER_NAME> Name of the ECS cluster to clean.
--key=<PRIV_KEY> Path to the SSH key for accessing EC2 instances.
"""
import subprocess
import boto3
import docopt
def get_ec2_dns_names(cluster):
"""
Generates the public DNS names of instances in the cluster.
"""
ecs = boto3.client('ecs')
resp = ecs.list_container_instances(cluster='api_cluster')
arns = resp['containerInstanceArns']
resp = ecs.describe_container_instances(cluster='api_cluster', containerInstances=arns)
instance_ids = [e['ec2InstanceId'] for e in resp['containerInstances']]
ec2 = boto3.client('ec2')
resp = ec2.describe_instances(InstanceIds=instance_ids)
for r in resp['Reservations']:
for i in r['Instances']:
yield i['PublicDnsName']
def main():
args = docopt.docopt(__doc__)
for name in get_ec2_dns_names(args['--cluster']):
print(f'*** {name}')
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rm $(docker ps -a -q)'
])
proc.communicate()
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rmi $(docker images -q)'
])
proc.communicate()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for cleaning our ECS clusters<commit_after>
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Remove unused Docker containers and images from an ECS cluster.
Usage:
ecs_clean_cluster.py --key=<PRIV_KEY> --cluster=<CLUSTER_NAME>
ecs_clean_cluster.py -h | --help
Options:
--cluster=<CLUSTER_NAME> Name of the ECS cluster to clean.
--key=<PRIV_KEY> Path to the SSH key for accessing EC2 instances.
"""
import subprocess
import boto3
import docopt
def get_ec2_dns_names(cluster):
"""
Generates the public DNS names of instances in the cluster.
"""
ecs = boto3.client('ecs')
resp = ecs.list_container_instances(cluster='api_cluster')
arns = resp['containerInstanceArns']
resp = ecs.describe_container_instances(cluster='api_cluster', containerInstances=arns)
instance_ids = [e['ec2InstanceId'] for e in resp['containerInstances']]
ec2 = boto3.client('ec2')
resp = ec2.describe_instances(InstanceIds=instance_ids)
for r in resp['Reservations']:
for i in r['Instances']:
yield i['PublicDnsName']
def main():
args = docopt.docopt(__doc__)
for name in get_ec2_dns_names(args['--cluster']):
print(f'*** {name}')
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rm $(docker ps -a -q)'
])
proc.communicate()
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rmi $(docker images -q)'
])
proc.communicate()
if __name__ == '__main__':
main()
|
Add a script for cleaning our ECS clusters#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Remove unused Docker containers and images from an ECS cluster.
Usage:
ecs_clean_cluster.py --key=<PRIV_KEY> --cluster=<CLUSTER_NAME>
ecs_clean_cluster.py -h | --help
Options:
--cluster=<CLUSTER_NAME> Name of the ECS cluster to clean.
--key=<PRIV_KEY> Path to the SSH key for accessing EC2 instances.
"""
import subprocess
import boto3
import docopt
def get_ec2_dns_names(cluster):
"""
Generates the public DNS names of instances in the cluster.
"""
ecs = boto3.client('ecs')
resp = ecs.list_container_instances(cluster='api_cluster')
arns = resp['containerInstanceArns']
resp = ecs.describe_container_instances(cluster='api_cluster', containerInstances=arns)
instance_ids = [e['ec2InstanceId'] for e in resp['containerInstances']]
ec2 = boto3.client('ec2')
resp = ec2.describe_instances(InstanceIds=instance_ids)
for r in resp['Reservations']:
for i in r['Instances']:
yield i['PublicDnsName']
def main():
args = docopt.docopt(__doc__)
for name in get_ec2_dns_names(args['--cluster']):
print(f'*** {name}')
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rm $(docker ps -a -q)'
])
proc.communicate()
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rmi $(docker images -q)'
])
proc.communicate()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for cleaning our ECS clusters<commit_after>#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Remove unused Docker containers and images from an ECS cluster.
Usage:
ecs_clean_cluster.py --key=<PRIV_KEY> --cluster=<CLUSTER_NAME>
ecs_clean_cluster.py -h | --help
Options:
--cluster=<CLUSTER_NAME> Name of the ECS cluster to clean.
--key=<PRIV_KEY> Path to the SSH key for accessing EC2 instances.
"""
import subprocess
import boto3
import docopt
def get_ec2_dns_names(cluster):
"""
Generates the public DNS names of instances in the cluster.
"""
ecs = boto3.client('ecs')
resp = ecs.list_container_instances(cluster='api_cluster')
arns = resp['containerInstanceArns']
resp = ecs.describe_container_instances(cluster='api_cluster', containerInstances=arns)
instance_ids = [e['ec2InstanceId'] for e in resp['containerInstances']]
ec2 = boto3.client('ec2')
resp = ec2.describe_instances(InstanceIds=instance_ids)
for r in resp['Reservations']:
for i in r['Instances']:
yield i['PublicDnsName']
def main():
args = docopt.docopt(__doc__)
for name in get_ec2_dns_names(args['--cluster']):
print(f'*** {name}')
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rm $(docker ps -a -q)'
])
proc.communicate()
proc = subprocess.Popen([
'ssh', '-i', args['--key'],
f'core@{name}', 'docker rmi $(docker images -q)'
])
proc.communicate()
if __name__ == '__main__':
main()
|
|
d32aa1f54cd1224de506564089805576cb3ce286
|
json2csv.py
|
json2csv.py
|
import json
def main():
input_json = json.load(open("photo_id_to_business_id.json"))
# print the header of output csv file
print 'photo_id,business_id,label'
# for each entry in input json file print one csv row
for i in xrange(len(input_json)):
photo_id = input_json[i]['photo_id']
business_id = input_json[i]['business_id']
label = input_json[i]['label']
print photo_id + ',' + business_id + ',' + label
if __name__ == "__main__":
main()
|
Add conversion from json to csv format
|
Add conversion from json to csv format
|
Python
|
mit
|
aysent/yelp-photo-explorer
|
Add conversion from json to csv format
|
import json
def main():
input_json = json.load(open("photo_id_to_business_id.json"))
# print the header of output csv file
print 'photo_id,business_id,label'
# for each entry in input json file print one csv row
for i in xrange(len(input_json)):
photo_id = input_json[i]['photo_id']
business_id = input_json[i]['business_id']
label = input_json[i]['label']
print photo_id + ',' + business_id + ',' + label
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add conversion from json to csv format<commit_after>
|
import json
def main():
input_json = json.load(open("photo_id_to_business_id.json"))
# print the header of output csv file
print 'photo_id,business_id,label'
# for each entry in input json file print one csv row
for i in xrange(len(input_json)):
photo_id = input_json[i]['photo_id']
business_id = input_json[i]['business_id']
label = input_json[i]['label']
print photo_id + ',' + business_id + ',' + label
if __name__ == "__main__":
main()
|
Add conversion from json to csv formatimport json
def main():
input_json = json.load(open("photo_id_to_business_id.json"))
# print the header of output csv file
print 'photo_id,business_id,label'
# for each entry in input json file print one csv row
for i in xrange(len(input_json)):
photo_id = input_json[i]['photo_id']
business_id = input_json[i]['business_id']
label = input_json[i]['label']
print photo_id + ',' + business_id + ',' + label
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add conversion from json to csv format<commit_after>import json
def main():
input_json = json.load(open("photo_id_to_business_id.json"))
# print the header of output csv file
print 'photo_id,business_id,label'
# for each entry in input json file print one csv row
for i in xrange(len(input_json)):
photo_id = input_json[i]['photo_id']
business_id = input_json[i]['business_id']
label = input_json[i]['label']
print photo_id + ',' + business_id + ',' + label
if __name__ == "__main__":
main()
|
|
be946d43536b06f6a6786b2fc2e379571ee30bfd
|
scorecard/tests/indicators/test_income_adjustments.py
|
scorecard/tests/indicators/test_income_adjustments.py
|
from django.test import SimpleTestCase
from ...profile_data.indicators import (
IncomeAdjustments,
)
from collections import defaultdict
class MockAPIData:
references = defaultdict(lambda: "foobar")
def __init__(self, results, years):
self.results = results
self.years = years
class RevenueSourcesTests(SimpleTestCase):
maxDiff = None
def test_v1(self):
"""
- local and government are summed correctly
- total is calculated correctly
- percentages are calculated correctly
- latest audit year is used, other years ignored
"""
api_data = MockAPIData(
{
"revenue_budget_actual_v1": [
{
"item.code": "1300",
"amount.sum": 200,
"financial_year_end.year": 2050,
"amount_type.code": "ORGB",
},
{
"item.code": "1300",
"amount.sum": 210,
"financial_year_end.year": 2050,
"amount_type.code": "ADJB",
},
{
"item.code": "1300",
"amount.sum": 220,
"financial_year_end.year": 2050,
"amount_type.code": "AUDA",
},
],
"revenue_budget_actual_v2": [],
},
[2050, 2049, 2048, 2047]
)
expected = {
2050: [
{
"item": "Fines",
"amount": 10,
"comparison": "Original to adjusted budget",
"percent_changed": 5
},
{
"item": "Fines",
"amount": 20,
"comparison": "Original budget to audited outcome",
"percent_changed": 10
},
]
}
actual = IncomeAdjustments.get_muni_specifics(api_data)
self.assertEqual(expected, actual)
|
Add working test for minimal income adjustment functionality
|
Add working test for minimal income adjustment functionality
|
Python
|
mit
|
Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data
|
Add working test for minimal income adjustment functionality
|
from django.test import SimpleTestCase
from ...profile_data.indicators import (
IncomeAdjustments,
)
from collections import defaultdict
class MockAPIData:
references = defaultdict(lambda: "foobar")
def __init__(self, results, years):
self.results = results
self.years = years
class RevenueSourcesTests(SimpleTestCase):
maxDiff = None
def test_v1(self):
"""
- local and government are summed correctly
- total is calculated correctly
- percentages are calculated correctly
- latest audit year is used, other years ignored
"""
api_data = MockAPIData(
{
"revenue_budget_actual_v1": [
{
"item.code": "1300",
"amount.sum": 200,
"financial_year_end.year": 2050,
"amount_type.code": "ORGB",
},
{
"item.code": "1300",
"amount.sum": 210,
"financial_year_end.year": 2050,
"amount_type.code": "ADJB",
},
{
"item.code": "1300",
"amount.sum": 220,
"financial_year_end.year": 2050,
"amount_type.code": "AUDA",
},
],
"revenue_budget_actual_v2": [],
},
[2050, 2049, 2048, 2047]
)
expected = {
2050: [
{
"item": "Fines",
"amount": 10,
"comparison": "Original to adjusted budget",
"percent_changed": 5
},
{
"item": "Fines",
"amount": 20,
"comparison": "Original budget to audited outcome",
"percent_changed": 10
},
]
}
actual = IncomeAdjustments.get_muni_specifics(api_data)
self.assertEqual(expected, actual)
|
<commit_before><commit_msg>Add working test for minimal income adjustment functionality<commit_after>
|
from django.test import SimpleTestCase
from ...profile_data.indicators import (
IncomeAdjustments,
)
from collections import defaultdict
class MockAPIData:
references = defaultdict(lambda: "foobar")
def __init__(self, results, years):
self.results = results
self.years = years
class RevenueSourcesTests(SimpleTestCase):
maxDiff = None
def test_v1(self):
"""
- local and government are summed correctly
- total is calculated correctly
- percentages are calculated correctly
- latest audit year is used, other years ignored
"""
api_data = MockAPIData(
{
"revenue_budget_actual_v1": [
{
"item.code": "1300",
"amount.sum": 200,
"financial_year_end.year": 2050,
"amount_type.code": "ORGB",
},
{
"item.code": "1300",
"amount.sum": 210,
"financial_year_end.year": 2050,
"amount_type.code": "ADJB",
},
{
"item.code": "1300",
"amount.sum": 220,
"financial_year_end.year": 2050,
"amount_type.code": "AUDA",
},
],
"revenue_budget_actual_v2": [],
},
[2050, 2049, 2048, 2047]
)
expected = {
2050: [
{
"item": "Fines",
"amount": 10,
"comparison": "Original to adjusted budget",
"percent_changed": 5
},
{
"item": "Fines",
"amount": 20,
"comparison": "Original budget to audited outcome",
"percent_changed": 10
},
]
}
actual = IncomeAdjustments.get_muni_specifics(api_data)
self.assertEqual(expected, actual)
|
Add working test for minimal income adjustment functionalityfrom django.test import SimpleTestCase
from ...profile_data.indicators import (
IncomeAdjustments,
)
from collections import defaultdict
class MockAPIData:
references = defaultdict(lambda: "foobar")
def __init__(self, results, years):
self.results = results
self.years = years
class RevenueSourcesTests(SimpleTestCase):
maxDiff = None
def test_v1(self):
"""
- local and government are summed correctly
- total is calculated correctly
- percentages are calculated correctly
- latest audit year is used, other years ignored
"""
api_data = MockAPIData(
{
"revenue_budget_actual_v1": [
{
"item.code": "1300",
"amount.sum": 200,
"financial_year_end.year": 2050,
"amount_type.code": "ORGB",
},
{
"item.code": "1300",
"amount.sum": 210,
"financial_year_end.year": 2050,
"amount_type.code": "ADJB",
},
{
"item.code": "1300",
"amount.sum": 220,
"financial_year_end.year": 2050,
"amount_type.code": "AUDA",
},
],
"revenue_budget_actual_v2": [],
},
[2050, 2049, 2048, 2047]
)
expected = {
2050: [
{
"item": "Fines",
"amount": 10,
"comparison": "Original to adjusted budget",
"percent_changed": 5
},
{
"item": "Fines",
"amount": 20,
"comparison": "Original budget to audited outcome",
"percent_changed": 10
},
]
}
actual = IncomeAdjustments.get_muni_specifics(api_data)
self.assertEqual(expected, actual)
|
<commit_before><commit_msg>Add working test for minimal income adjustment functionality<commit_after>from django.test import SimpleTestCase
from ...profile_data.indicators import (
IncomeAdjustments,
)
from collections import defaultdict
class MockAPIData:
references = defaultdict(lambda: "foobar")
def __init__(self, results, years):
self.results = results
self.years = years
class RevenueSourcesTests(SimpleTestCase):
maxDiff = None
def test_v1(self):
"""
- local and government are summed correctly
- total is calculated correctly
- percentages are calculated correctly
- latest audit year is used, other years ignored
"""
api_data = MockAPIData(
{
"revenue_budget_actual_v1": [
{
"item.code": "1300",
"amount.sum": 200,
"financial_year_end.year": 2050,
"amount_type.code": "ORGB",
},
{
"item.code": "1300",
"amount.sum": 210,
"financial_year_end.year": 2050,
"amount_type.code": "ADJB",
},
{
"item.code": "1300",
"amount.sum": 220,
"financial_year_end.year": 2050,
"amount_type.code": "AUDA",
},
],
"revenue_budget_actual_v2": [],
},
[2050, 2049, 2048, 2047]
)
expected = {
2050: [
{
"item": "Fines",
"amount": 10,
"comparison": "Original to adjusted budget",
"percent_changed": 5
},
{
"item": "Fines",
"amount": 20,
"comparison": "Original budget to audited outcome",
"percent_changed": 10
},
]
}
actual = IncomeAdjustments.get_muni_specifics(api_data)
self.assertEqual(expected, actual)
|
|
95d719c79999f65f373830127763f79678425f46
|
gala-training-crossval-sub.py
|
gala-training-crossval-sub.py
|
# IPython log file
from gala import classify
datas = []
labels = []
import numpy as np
list(map(np.shape, labels))
for i in range(3, 4):
data, label = classify.load_training_data_from_disk('training-data-%i.h5' % i, names=['data', 'labels'])
datas.append(data)
labels.append(label[:, 0])
X0 = np.concatenate(datas, axis=0)
y0 = np.concatenate(labels)
idx = np.random.choice(len(y0), size=3000, replace=False)
X, y = X0[idx], y0[idx]
param_dist = {'n_estimators': [20, 100, 200, 500],
'max_depth': [3, 5, 20, None],
'max_features': ['auto', 5, 10, 20],
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
from sklearn import grid_search as gs
from time import time
from sklearn import ensemble
ensemble.RandomForestClassifier().get_params().keys()
rf = ensemble.RandomForestClassifier()
random_search = gs.GridSearchCV(rf, param_grid=param_dist, refit=False,
verbose=2, n_jobs=12)
start=time(); random_search.fit(X, y); stop=time()
|
Add script to test RF performance on gala data
|
Add script to test RF performance on gala data
|
Python
|
bsd-3-clause
|
jni/gala-scripts
|
Add script to test RF performance on gala data
|
# IPython log file
from gala import classify
datas = []
labels = []
import numpy as np
list(map(np.shape, labels))
for i in range(3, 4):
data, label = classify.load_training_data_from_disk('training-data-%i.h5' % i, names=['data', 'labels'])
datas.append(data)
labels.append(label[:, 0])
X0 = np.concatenate(datas, axis=0)
y0 = np.concatenate(labels)
idx = np.random.choice(len(y0), size=3000, replace=False)
X, y = X0[idx], y0[idx]
param_dist = {'n_estimators': [20, 100, 200, 500],
'max_depth': [3, 5, 20, None],
'max_features': ['auto', 5, 10, 20],
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
from sklearn import grid_search as gs
from time import time
from sklearn import ensemble
ensemble.RandomForestClassifier().get_params().keys()
rf = ensemble.RandomForestClassifier()
random_search = gs.GridSearchCV(rf, param_grid=param_dist, refit=False,
verbose=2, n_jobs=12)
start=time(); random_search.fit(X, y); stop=time()
|
<commit_before><commit_msg>Add script to test RF performance on gala data<commit_after>
|
# IPython log file
from gala import classify
datas = []
labels = []
import numpy as np
list(map(np.shape, labels))
for i in range(3, 4):
data, label = classify.load_training_data_from_disk('training-data-%i.h5' % i, names=['data', 'labels'])
datas.append(data)
labels.append(label[:, 0])
X0 = np.concatenate(datas, axis=0)
y0 = np.concatenate(labels)
idx = np.random.choice(len(y0), size=3000, replace=False)
X, y = X0[idx], y0[idx]
param_dist = {'n_estimators': [20, 100, 200, 500],
'max_depth': [3, 5, 20, None],
'max_features': ['auto', 5, 10, 20],
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
from sklearn import grid_search as gs
from time import time
from sklearn import ensemble
ensemble.RandomForestClassifier().get_params().keys()
rf = ensemble.RandomForestClassifier()
random_search = gs.GridSearchCV(rf, param_grid=param_dist, refit=False,
verbose=2, n_jobs=12)
start=time(); random_search.fit(X, y); stop=time()
|
Add script to test RF performance on gala data# IPython log file
from gala import classify
datas = []
labels = []
import numpy as np
list(map(np.shape, labels))
for i in range(3, 4):
data, label = classify.load_training_data_from_disk('training-data-%i.h5' % i, names=['data', 'labels'])
datas.append(data)
labels.append(label[:, 0])
X0 = np.concatenate(datas, axis=0)
y0 = np.concatenate(labels)
idx = np.random.choice(len(y0), size=3000, replace=False)
X, y = X0[idx], y0[idx]
param_dist = {'n_estimators': [20, 100, 200, 500],
'max_depth': [3, 5, 20, None],
'max_features': ['auto', 5, 10, 20],
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
from sklearn import grid_search as gs
from time import time
from sklearn import ensemble
ensemble.RandomForestClassifier().get_params().keys()
rf = ensemble.RandomForestClassifier()
random_search = gs.GridSearchCV(rf, param_grid=param_dist, refit=False,
verbose=2, n_jobs=12)
start=time(); random_search.fit(X, y); stop=time()
|
<commit_before><commit_msg>Add script to test RF performance on gala data<commit_after># IPython log file
from gala import classify
datas = []
labels = []
import numpy as np
list(map(np.shape, labels))
for i in range(3, 4):
data, label = classify.load_training_data_from_disk('training-data-%i.h5' % i, names=['data', 'labels'])
datas.append(data)
labels.append(label[:, 0])
X0 = np.concatenate(datas, axis=0)
y0 = np.concatenate(labels)
idx = np.random.choice(len(y0), size=3000, replace=False)
X, y = X0[idx], y0[idx]
param_dist = {'n_estimators': [20, 100, 200, 500],
'max_depth': [3, 5, 20, None],
'max_features': ['auto', 5, 10, 20],
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
from sklearn import grid_search as gs
from time import time
from sklearn import ensemble
ensemble.RandomForestClassifier().get_params().keys()
rf = ensemble.RandomForestClassifier()
random_search = gs.GridSearchCV(rf, param_grid=param_dist, refit=False,
verbose=2, n_jobs=12)
start=time(); random_search.fit(X, y); stop=time()
|
|
c6a98ccdb32e24c8958bf5979b1aa084ba04792a
|
tests/vtc-from-controlset.py
|
tests/vtc-from-controlset.py
|
#!/usr/bin/python
#
# Generate a varnishtest script to validate the preclassified
# U-A strings in the control set.
#
import sys
HEADER="""varnishtest "automatic test of control set"
server s1 {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
include "${projectdir}/../devicedetect.vcl";
sub vcl_recv { call devicedetect; }
sub vcl_deliver { set resp.http.X-UA-Device = req.http.X-UA-Device; }
} -start
client c1 {
"""
TAILER="""
}
client c1 -run
"""
def main():
print HEADER
for line in open("../controlset.txt").readlines():
if line.startswith("#"): continue
line = line.strip()
if len(line) == 0: continue
classid, uastring = line.split("\t", 1)
#print >>sys.stderr, classid, uastring
print "\ttxreq -hdr \"User-Agent: %s\"" % uastring
print "\trxresp"
print "\texpect resp.http.X-UA-Device == \"%s\"" % classid
print "\n" # for readability
print TAILER
if __name__ == "__main__":
main()
|
Create test case from control set
|
Create test case from control set
|
Python
|
bsd-2-clause
|
varnish/varnish-devicedetect,wikp/varnish-devicedetect,varnish/varnish-devicedetect,kevinquinnyo/varnish-devicedetect,wikp/varnish-devicedetect,kevinquinnyo/varnish-devicedetect
|
Create test case from control set
|
#!/usr/bin/python
#
# Generate a varnishtest script to validate the preclassified
# U-A strings in the control set.
#
import sys
HEADER="""varnishtest "automatic test of control set"
server s1 {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
include "${projectdir}/../devicedetect.vcl";
sub vcl_recv { call devicedetect; }
sub vcl_deliver { set resp.http.X-UA-Device = req.http.X-UA-Device; }
} -start
client c1 {
"""
TAILER="""
}
client c1 -run
"""
def main():
print HEADER
for line in open("../controlset.txt").readlines():
if line.startswith("#"): continue
line = line.strip()
if len(line) == 0: continue
classid, uastring = line.split("\t", 1)
#print >>sys.stderr, classid, uastring
print "\ttxreq -hdr \"User-Agent: %s\"" % uastring
print "\trxresp"
print "\texpect resp.http.X-UA-Device == \"%s\"" % classid
print "\n" # for readability
print TAILER
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create test case from control set<commit_after>
|
#!/usr/bin/python
#
# Generate a varnishtest script to validate the preclassified
# U-A strings in the control set.
#
import sys
HEADER="""varnishtest "automatic test of control set"
server s1 {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
include "${projectdir}/../devicedetect.vcl";
sub vcl_recv { call devicedetect; }
sub vcl_deliver { set resp.http.X-UA-Device = req.http.X-UA-Device; }
} -start
client c1 {
"""
TAILER="""
}
client c1 -run
"""
def main():
print HEADER
for line in open("../controlset.txt").readlines():
if line.startswith("#"): continue
line = line.strip()
if len(line) == 0: continue
classid, uastring = line.split("\t", 1)
#print >>sys.stderr, classid, uastring
print "\ttxreq -hdr \"User-Agent: %s\"" % uastring
print "\trxresp"
print "\texpect resp.http.X-UA-Device == \"%s\"" % classid
print "\n" # for readability
print TAILER
if __name__ == "__main__":
main()
|
Create test case from control set#!/usr/bin/python
#
# Generate a varnishtest script to validate the preclassified
# U-A strings in the control set.
#
import sys
HEADER="""varnishtest "automatic test of control set"
server s1 {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
include "${projectdir}/../devicedetect.vcl";
sub vcl_recv { call devicedetect; }
sub vcl_deliver { set resp.http.X-UA-Device = req.http.X-UA-Device; }
} -start
client c1 {
"""
TAILER="""
}
client c1 -run
"""
def main():
print HEADER
for line in open("../controlset.txt").readlines():
if line.startswith("#"): continue
line = line.strip()
if len(line) == 0: continue
classid, uastring = line.split("\t", 1)
#print >>sys.stderr, classid, uastring
print "\ttxreq -hdr \"User-Agent: %s\"" % uastring
print "\trxresp"
print "\texpect resp.http.X-UA-Device == \"%s\"" % classid
print "\n" # for readability
print TAILER
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create test case from control set<commit_after>#!/usr/bin/python
#
# Generate a varnishtest script to validate the preclassified
# U-A strings in the control set.
#
import sys
HEADER="""varnishtest "automatic test of control set"
server s1 {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
include "${projectdir}/../devicedetect.vcl";
sub vcl_recv { call devicedetect; }
sub vcl_deliver { set resp.http.X-UA-Device = req.http.X-UA-Device; }
} -start
client c1 {
"""
TAILER="""
}
client c1 -run
"""
def main():
print HEADER
for line in open("../controlset.txt").readlines():
if line.startswith("#"): continue
line = line.strip()
if len(line) == 0: continue
classid, uastring = line.split("\t", 1)
#print >>sys.stderr, classid, uastring
print "\ttxreq -hdr \"User-Agent: %s\"" % uastring
print "\trxresp"
print "\texpect resp.http.X-UA-Device == \"%s\"" % classid
print "\n" # for readability
print TAILER
if __name__ == "__main__":
main()
|
|
86b04012c3e6e9c0606ac314da6f74a51d5d7cc3
|
word_bucket.py
|
word_bucket.py
|
#!/usr/bin/env python
import random
import sys
"""
Takes a string containing {sections} to be replaced. Replaces them until no
braces are left in the string.
Optionally takes an argument to use as seed, for assigning results to things.
Argument can be any string, most likely a name.
"""
#string = '{title} of {adj} {discipline}'
string = '{artifact_type} of {group}'
if len(sys.argv) > 1:
random.seed(sys.argv[1])
adjectives = [
'Inner', 'Outer', 'Middle', 'Upper', 'Lower',
'Northern', 'Southern', 'Eastern', 'Western',
'Recent', 'Ancient', 'Future',
'Applied', 'Theoretical', 'Imaginary', 'Impossible',
'Friendly', 'Savage', 'Vindictive',
'Invisible', 'Indefinite', 'Inadvisable', 'Oblique', 'Extreme', 'Battle', 'Infected',
]
nouns = [
'Bandits', 'Bikers', 'Raiders', 'Guardians', 'Legion', 'Orcs',
'Savages', 'Wizards', 'Clan', 'Zombies', 'Merchants', 'Army', 'Guild',
'Followers of {adj} {discipline}',
]
languages = [
'English', 'Spanish', 'Russian', 'German', 'French'
]
disciplines = [
'Communications', 'Science', 'Bibliomancy', 'Astronomy', 'Horticulture',
'Geography', 'Magic',
'Computer {discipline}',
'{language}',
]
titles = [
'Chair', 'Dean', 'Professor', 'Lecturer',
'{title_adj} {title}',
]
title_adjs = [
'Senior', 'Junior', 'Assistant',
]
artifact_types = [
'Tablet', 'Device', 'Scrolls', 'Remains', 'Casket', 'Ark', 'Journals',
'Totem', 'Icon', 'Idol',
]
groups = [
'the {adj} {thing}',
'the Order of {adj} {thing}',
]
while '{' in string:
values = dict(
discipline = random.choice(disciplines),
language = random.choice(languages),
adj = random.choice(adjectives),
title = random.choice(titles),
title_adj = random.choice(title_adjs),
artifact_type = random.choice(artifact_types),
thing = random.choice(nouns),
group = random.choice(groups),
)
string = string.format(**values)
print(string)
|
Save my musings to git.
|
Save my musings to git.
|
Python
|
apache-2.0
|
Qalthos/wordbuckets
|
Save my musings to git.
|
#!/usr/bin/env python
import random
import sys
"""
Takes a string containing {sections} to be replaced. Replaces them until no
braces are left in the string.
Optionally takes an argument to use as seed, for assigning results to things.
Argument can be any string, most likely a name.
"""
#string = '{title} of {adj} {discipline}'
string = '{artifact_type} of {group}'
if len(sys.argv) > 1:
random.seed(sys.argv[1])
adjectives = [
'Inner', 'Outer', 'Middle', 'Upper', 'Lower',
'Northern', 'Southern', 'Eastern', 'Western',
'Recent', 'Ancient', 'Future',
'Applied', 'Theoretical', 'Imaginary', 'Impossible',
'Friendly', 'Savage', 'Vindictive',
'Invisible', 'Indefinite', 'Inadvisable', 'Oblique', 'Extreme', 'Battle', 'Infected',
]
nouns = [
'Bandits', 'Bikers', 'Raiders', 'Guardians', 'Legion', 'Orcs',
'Savages', 'Wizards', 'Clan', 'Zombies', 'Merchants', 'Army', 'Guild',
'Followers of {adj} {discipline}',
]
languages = [
'English', 'Spanish', 'Russian', 'German', 'French'
]
disciplines = [
'Communications', 'Science', 'Bibliomancy', 'Astronomy', 'Horticulture',
'Geography', 'Magic',
'Computer {discipline}',
'{language}',
]
titles = [
'Chair', 'Dean', 'Professor', 'Lecturer',
'{title_adj} {title}',
]
title_adjs = [
'Senior', 'Junior', 'Assistant',
]
artifact_types = [
'Tablet', 'Device', 'Scrolls', 'Remains', 'Casket', 'Ark', 'Journals',
'Totem', 'Icon', 'Idol',
]
groups = [
'the {adj} {thing}',
'the Order of {adj} {thing}',
]
while '{' in string:
values = dict(
discipline = random.choice(disciplines),
language = random.choice(languages),
adj = random.choice(adjectives),
title = random.choice(titles),
title_adj = random.choice(title_adjs),
artifact_type = random.choice(artifact_types),
thing = random.choice(nouns),
group = random.choice(groups),
)
string = string.format(**values)
print(string)
|
<commit_before><commit_msg>Save my musings to git.<commit_after>
|
#!/usr/bin/env python
import random
import sys
"""
Takes a string containing {sections} to be replaced. Replaces them until no
braces are left in the string.
Optionally takes an argument to use as seed, for assigning results to things.
Argument can be any string, most likely a name.
"""
#string = '{title} of {adj} {discipline}'
string = '{artifact_type} of {group}'
if len(sys.argv) > 1:
random.seed(sys.argv[1])
adjectives = [
'Inner', 'Outer', 'Middle', 'Upper', 'Lower',
'Northern', 'Southern', 'Eastern', 'Western',
'Recent', 'Ancient', 'Future',
'Applied', 'Theoretical', 'Imaginary', 'Impossible',
'Friendly', 'Savage', 'Vindictive',
'Invisible', 'Indefinite', 'Inadvisable', 'Oblique', 'Extreme', 'Battle', 'Infected',
]
nouns = [
'Bandits', 'Bikers', 'Raiders', 'Guardians', 'Legion', 'Orcs',
'Savages', 'Wizards', 'Clan', 'Zombies', 'Merchants', 'Army', 'Guild',
'Followers of {adj} {discipline}',
]
languages = [
'English', 'Spanish', 'Russian', 'German', 'French'
]
disciplines = [
'Communications', 'Science', 'Bibliomancy', 'Astronomy', 'Horticulture',
'Geography', 'Magic',
'Computer {discipline}',
'{language}',
]
titles = [
'Chair', 'Dean', 'Professor', 'Lecturer',
'{title_adj} {title}',
]
title_adjs = [
'Senior', 'Junior', 'Assistant',
]
artifact_types = [
'Tablet', 'Device', 'Scrolls', 'Remains', 'Casket', 'Ark', 'Journals',
'Totem', 'Icon', 'Idol',
]
groups = [
'the {adj} {thing}',
'the Order of {adj} {thing}',
]
while '{' in string:
values = dict(
discipline = random.choice(disciplines),
language = random.choice(languages),
adj = random.choice(adjectives),
title = random.choice(titles),
title_adj = random.choice(title_adjs),
artifact_type = random.choice(artifact_types),
thing = random.choice(nouns),
group = random.choice(groups),
)
string = string.format(**values)
print(string)
|
Save my musings to git.#!/usr/bin/env python
import random
import sys
"""
Takes a string containing {sections} to be replaced. Replaces them until no
braces are left in the string.
Optionally takes an argument to use as seed, for assigning results to things.
Argument can be any string, most likely a name.
"""
#string = '{title} of {adj} {discipline}'
string = '{artifact_type} of {group}'
if len(sys.argv) > 1:
random.seed(sys.argv[1])
adjectives = [
'Inner', 'Outer', 'Middle', 'Upper', 'Lower',
'Northern', 'Southern', 'Eastern', 'Western',
'Recent', 'Ancient', 'Future',
'Applied', 'Theoretical', 'Imaginary', 'Impossible',
'Friendly', 'Savage', 'Vindictive',
'Invisible', 'Indefinite', 'Inadvisable', 'Oblique', 'Extreme', 'Battle', 'Infected',
]
nouns = [
'Bandits', 'Bikers', 'Raiders', 'Guardians', 'Legion', 'Orcs',
'Savages', 'Wizards', 'Clan', 'Zombies', 'Merchants', 'Army', 'Guild',
'Followers of {adj} {discipline}',
]
languages = [
'English', 'Spanish', 'Russian', 'German', 'French'
]
disciplines = [
'Communications', 'Science', 'Bibliomancy', 'Astronomy', 'Horticulture',
'Geography', 'Magic',
'Computer {discipline}',
'{language}',
]
titles = [
'Chair', 'Dean', 'Professor', 'Lecturer',
'{title_adj} {title}',
]
title_adjs = [
'Senior', 'Junior', 'Assistant',
]
artifact_types = [
'Tablet', 'Device', 'Scrolls', 'Remains', 'Casket', 'Ark', 'Journals',
'Totem', 'Icon', 'Idol',
]
groups = [
'the {adj} {thing}',
'the Order of {adj} {thing}',
]
while '{' in string:
values = dict(
discipline = random.choice(disciplines),
language = random.choice(languages),
adj = random.choice(adjectives),
title = random.choice(titles),
title_adj = random.choice(title_adjs),
artifact_type = random.choice(artifact_types),
thing = random.choice(nouns),
group = random.choice(groups),
)
string = string.format(**values)
print(string)
|
<commit_before><commit_msg>Save my musings to git.<commit_after>#!/usr/bin/env python
import random
import sys
"""
Takes a string containing {sections} to be replaced. Replaces them until no
braces are left in the string.
Optionally takes an argument to use as seed, for assigning results to things.
Argument can be any string, most likely a name.
"""
#string = '{title} of {adj} {discipline}'
string = '{artifact_type} of {group}'
if len(sys.argv) > 1:
random.seed(sys.argv[1])
adjectives = [
'Inner', 'Outer', 'Middle', 'Upper', 'Lower',
'Northern', 'Southern', 'Eastern', 'Western',
'Recent', 'Ancient', 'Future',
'Applied', 'Theoretical', 'Imaginary', 'Impossible',
'Friendly', 'Savage', 'Vindictive',
'Invisible', 'Indefinite', 'Inadvisable', 'Oblique', 'Extreme', 'Battle', 'Infected',
]
nouns = [
'Bandits', 'Bikers', 'Raiders', 'Guardians', 'Legion', 'Orcs',
'Savages', 'Wizards', 'Clan', 'Zombies', 'Merchants', 'Army', 'Guild',
'Followers of {adj} {discipline}',
]
languages = [
'English', 'Spanish', 'Russian', 'German', 'French'
]
disciplines = [
'Communications', 'Science', 'Bibliomancy', 'Astronomy', 'Horticulture',
'Geography', 'Magic',
'Computer {discipline}',
'{language}',
]
titles = [
'Chair', 'Dean', 'Professor', 'Lecturer',
'{title_adj} {title}',
]
title_adjs = [
'Senior', 'Junior', 'Assistant',
]
artifact_types = [
'Tablet', 'Device', 'Scrolls', 'Remains', 'Casket', 'Ark', 'Journals',
'Totem', 'Icon', 'Idol',
]
groups = [
'the {adj} {thing}',
'the Order of {adj} {thing}',
]
while '{' in string:
values = dict(
discipline = random.choice(disciplines),
language = random.choice(languages),
adj = random.choice(adjectives),
title = random.choice(titles),
title_adj = random.choice(title_adjs),
artifact_type = random.choice(artifact_types),
thing = random.choice(nouns),
group = random.choice(groups),
)
string = string.format(**values)
print(string)
|
|
89a427994c754fc63d01d7172e77f849039a356a
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
|
Change migration to fallback to old country
|
Change migration to fallback to old country
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
Change migration to fallback to old country
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
|
<commit_before>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
<commit_msg>Change migration to fallback to old country<commit_after>
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
Change migration to fallback to old countryfrom django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
|
<commit_before>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
<commit_msg>Change migration to fallback to old country<commit_after>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
|
1a856fa1894a3818a8f9311cef8c469a50fed48a
|
tests/test_selenium.py
|
tests/test_selenium.py
|
"""
Setting up functional testing for layout web tool.
"""
import unittest
import urllib
from flask import Flask
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class TestBase(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# Default port is 5000
app.config['LIVESERVER_PORT'] = 8943
# Default timeout is 5 seconds
app.config['LIVESERVER_TIMEOUT'] = 10
return app
def setUp(self):
opt = Options()
opt.add_argument("headless")
# FIXME: we may want to keep a policy so as to fix seccomp blocking
# the browser without disabling sandboxing altogether.
opt.add_argument("no-sandbox")
self.driver = webdriver.Chrome(options=opt)
def test_start(self):
driver = self.driver
driver.get('https://in-toto.engineering.nyu.edu/')
start = driver.find_element_by_xpath('/html/body/div[3]/div/div/a')
start.click()
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
Add functional testing using Selenium Webdriver
|
Add functional testing using Selenium Webdriver
Setting up the skeleton and initial tests for unit testing of front-end of layout-web-tool
|
Python
|
mit
|
in-toto/layout-web-tool,in-toto/layout-web-tool,in-toto/layout-web-tool
|
Add functional testing using Selenium Webdriver
Setting up the skeleton and initial tests for unit testing of front-end of layout-web-tool
|
"""
Setting up functional testing for layout web tool.
"""
import unittest
import urllib
from flask import Flask
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class TestBase(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# Default port is 5000
app.config['LIVESERVER_PORT'] = 8943
# Default timeout is 5 seconds
app.config['LIVESERVER_TIMEOUT'] = 10
return app
def setUp(self):
opt = Options()
opt.add_argument("headless")
# FIXME: we may want to keep a policy so as to fix seccomp blocking
# the browser without disabling sandboxing altogether.
opt.add_argument("no-sandbox")
self.driver = webdriver.Chrome(options=opt)
def test_start(self):
driver = self.driver
driver.get('https://in-toto.engineering.nyu.edu/')
start = driver.find_element_by_xpath('/html/body/div[3]/div/div/a')
start.click()
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add functional testing using Selenium Webdriver
Setting up the skeleton and initial tests for unit testing of front-end of layout-web-tool<commit_after>
|
"""
Setting up functional testing for layout web tool.
"""
import unittest
import urllib
from flask import Flask
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class TestBase(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# Default port is 5000
app.config['LIVESERVER_PORT'] = 8943
# Default timeout is 5 seconds
app.config['LIVESERVER_TIMEOUT'] = 10
return app
def setUp(self):
opt = Options()
opt.add_argument("headless")
# FIXME: we may want to keep a policy so as to fix seccomp blocking
# the browser without disabling sandboxing altogether.
opt.add_argument("no-sandbox")
self.driver = webdriver.Chrome(options=opt)
def test_start(self):
driver = self.driver
driver.get('https://in-toto.engineering.nyu.edu/')
start = driver.find_element_by_xpath('/html/body/div[3]/div/div/a')
start.click()
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
Add functional testing using Selenium Webdriver
Setting up the skeleton and initial tests for unit testing of front-end of layout-web-tool"""
Setting up functional testing for layout web tool.
"""
import unittest
import urllib
from flask import Flask
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class TestBase(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# Default port is 5000
app.config['LIVESERVER_PORT'] = 8943
# Default timeout is 5 seconds
app.config['LIVESERVER_TIMEOUT'] = 10
return app
def setUp(self):
opt = Options()
opt.add_argument("headless")
# FIXME: we may want to keep a policy so as to fix seccomp blocking
# the browser without disabling sandboxing altogether.
opt.add_argument("no-sandbox")
self.driver = webdriver.Chrome(options=opt)
def test_start(self):
driver = self.driver
driver.get('https://in-toto.engineering.nyu.edu/')
start = driver.find_element_by_xpath('/html/body/div[3]/div/div/a')
start.click()
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add functional testing using Selenium Webdriver
Setting up the skeleton and initial tests for unit testing of front-end of layout-web-tool<commit_after>"""
Setting up functional testing for layout web tool.
"""
import unittest
import urllib
from flask import Flask
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class TestBase(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# Default port is 5000
app.config['LIVESERVER_PORT'] = 8943
# Default timeout is 5 seconds
app.config['LIVESERVER_TIMEOUT'] = 10
return app
def setUp(self):
opt = Options()
opt.add_argument("headless")
# FIXME: we may want to keep a policy so as to fix seccomp blocking
# the browser without disabling sandboxing altogether.
opt.add_argument("no-sandbox")
self.driver = webdriver.Chrome(options=opt)
def test_start(self):
driver = self.driver
driver.get('https://in-toto.engineering.nyu.edu/')
start = driver.find_element_by_xpath('/html/body/div[3]/div/div/a')
start.click()
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
|
11c22561bd0475f9b58befd8bb47068c7c3a652a
|
api/players/management/commands/update_all_player_mmrs.py
|
api/players/management/commands/update_all_player_mmrs.py
|
import time
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from players.models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
start_date = timezone.now() - timedelta(days=7)
players = Player.objects.filter(Q(mmr_last_updated__isnull=True) | Q(mmr_last_updated__lt=start_date))
self.stdout.write('Updating MMR for {} players.'.format(players.count()))
for player in players:
player.update_mmr()
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Finished updating all player MMRs.'))
|
Add management command to update all player MMRs
|
Add management command to update all player MMRs
|
Python
|
apache-2.0
|
prattl/teamfinder,prattl/teamfinder,prattl/teamfinder,prattl/teamfinder
|
Add management command to update all player MMRs
|
import time
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from players.models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
start_date = timezone.now() - timedelta(days=7)
players = Player.objects.filter(Q(mmr_last_updated__isnull=True) | Q(mmr_last_updated__lt=start_date))
self.stdout.write('Updating MMR for {} players.'.format(players.count()))
for player in players:
player.update_mmr()
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Finished updating all player MMRs.'))
|
<commit_before><commit_msg>Add management command to update all player MMRs<commit_after>
|
import time
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from players.models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
start_date = timezone.now() - timedelta(days=7)
players = Player.objects.filter(Q(mmr_last_updated__isnull=True) | Q(mmr_last_updated__lt=start_date))
self.stdout.write('Updating MMR for {} players.'.format(players.count()))
for player in players:
player.update_mmr()
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Finished updating all player MMRs.'))
|
Add management command to update all player MMRsimport time
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from players.models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
start_date = timezone.now() - timedelta(days=7)
players = Player.objects.filter(Q(mmr_last_updated__isnull=True) | Q(mmr_last_updated__lt=start_date))
self.stdout.write('Updating MMR for {} players.'.format(players.count()))
for player in players:
player.update_mmr()
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Finished updating all player MMRs.'))
|
<commit_before><commit_msg>Add management command to update all player MMRs<commit_after>import time
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from players.models import Player
class Command(BaseCommand):
def handle(self, *args, **options):
start_date = timezone.now() - timedelta(days=7)
players = Player.objects.filter(Q(mmr_last_updated__isnull=True) | Q(mmr_last_updated__lt=start_date))
self.stdout.write('Updating MMR for {} players.'.format(players.count()))
for player in players:
player.update_mmr()
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Finished updating all player MMRs.'))
|
|
f5501cee5b1e7410a5d19522d14f1c9e49ad9d96
|
tests/test_sorting_and_searching/test_max_subarray.py
|
tests/test_sorting_and_searching/test_max_subarray.py
|
import unittest
from aids.sorting_and_searching.max_subarray import max_subarray
class MaxSubArrayTestCase(unittest.TestCase):
'''
Unit tests for max_subarray
'''
def setUp(self):
pass
def test_all_positive(self):
pass
def test_all_negative(self):
pass
def test_positive_and_negative(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for max_subarray
|
Add unit tests for max_subarray
|
Python
|
mit
|
ueg1990/aids
|
Add unit tests for max_subarray
|
import unittest
from aids.sorting_and_searching.max_subarray import max_subarray
class MaxSubArrayTestCase(unittest.TestCase):
'''
Unit tests for max_subarray
'''
def setUp(self):
pass
def test_all_positive(self):
pass
def test_all_negative(self):
pass
def test_positive_and_negative(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for max_subarray<commit_after>
|
import unittest
from aids.sorting_and_searching.max_subarray import max_subarray
class MaxSubArrayTestCase(unittest.TestCase):
'''
Unit tests for max_subarray
'''
def setUp(self):
pass
def test_all_positive(self):
pass
def test_all_negative(self):
pass
def test_positive_and_negative(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for max_subarrayimport unittest
from aids.sorting_and_searching.max_subarray import max_subarray
class MaxSubArrayTestCase(unittest.TestCase):
'''
Unit tests for max_subarray
'''
def setUp(self):
pass
def test_all_positive(self):
pass
def test_all_negative(self):
pass
def test_positive_and_negative(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for max_subarray<commit_after>import unittest
from aids.sorting_and_searching.max_subarray import max_subarray
class MaxSubArrayTestCase(unittest.TestCase):
'''
Unit tests for max_subarray
'''
def setUp(self):
pass
def test_all_positive(self):
pass
def test_all_negative(self):
pass
def test_positive_and_negative(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
1d77b6645e28f1e614502f6bd9bb5458383ecdcf
|
jacquard/tests/test_config.py
|
jacquard/tests/test_config.py
|
import io
import sys
import tempfile
import textwrap
from jacquard.config import load_config
CONFIG_FILE = """
[storage]
engine = dummy
url = dummy
[directory]
engine = dummy
[test_section]
test_key = test_value
"""
def load_test_config(extra=''):
f = io.StringIO(CONFIG_FILE + textwrap.dedent(extra))
return load_config(f)
def test_load_config_smoke():
load_test_config()
def test_load_config_from_file():
with tempfile.NamedTemporaryFile('w') as f:
f.write(CONFIG_FILE)
f.flush()
load_config(f.name)
def test_config_creates_storage_engine():
config = load_test_config()
with config.storage.transaction() as store:
store['bees'] = 'pony'
with config.storage.transaction() as store:
assert store['bees'] == 'pony'
def test_config_creates_directory():
config = load_test_config()
assert list(config.directory.all_users()) == []
def test_config_can_iterate_over_sections():
config = load_test_config()
assert set(config) == {'storage', 'directory', 'test_section', 'DEFAULT'}
def test_config_can_query_subsections():
config = load_test_config()
assert config['test_section']['test_key'] == 'test_value'
def test_config_can_test_section_inclusion():
config = load_test_config()
assert 'test_section' in config
assert 'test_section2' not in config
def test_config_section_len():
config = load_test_config()
assert len(config) == 4
def test_adds_extra_elements_to_path():
try:
sys.path.remove('/gravity')
except ValueError:
pass
load_test_config("""
[paths]
a_path = /gravity
""")
assert '/gravity' in sys.path
|
Add tests for config loading
|
Add tests for config loading
|
Python
|
mit
|
prophile/jacquard,prophile/jacquard
|
Add tests for config loading
|
import io
import sys
import tempfile
import textwrap
from jacquard.config import load_config
CONFIG_FILE = """
[storage]
engine = dummy
url = dummy
[directory]
engine = dummy
[test_section]
test_key = test_value
"""
def load_test_config(extra=''):
f = io.StringIO(CONFIG_FILE + textwrap.dedent(extra))
return load_config(f)
def test_load_config_smoke():
load_test_config()
def test_load_config_from_file():
with tempfile.NamedTemporaryFile('w') as f:
f.write(CONFIG_FILE)
f.flush()
load_config(f.name)
def test_config_creates_storage_engine():
config = load_test_config()
with config.storage.transaction() as store:
store['bees'] = 'pony'
with config.storage.transaction() as store:
assert store['bees'] == 'pony'
def test_config_creates_directory():
config = load_test_config()
assert list(config.directory.all_users()) == []
def test_config_can_iterate_over_sections():
config = load_test_config()
assert set(config) == {'storage', 'directory', 'test_section', 'DEFAULT'}
def test_config_can_query_subsections():
config = load_test_config()
assert config['test_section']['test_key'] == 'test_value'
def test_config_can_test_section_inclusion():
config = load_test_config()
assert 'test_section' in config
assert 'test_section2' not in config
def test_config_section_len():
config = load_test_config()
assert len(config) == 4
def test_adds_extra_elements_to_path():
try:
sys.path.remove('/gravity')
except ValueError:
pass
load_test_config("""
[paths]
a_path = /gravity
""")
assert '/gravity' in sys.path
|
<commit_before><commit_msg>Add tests for config loading<commit_after>
|
import io
import sys
import tempfile
import textwrap
from jacquard.config import load_config
CONFIG_FILE = """
[storage]
engine = dummy
url = dummy
[directory]
engine = dummy
[test_section]
test_key = test_value
"""
def load_test_config(extra=''):
f = io.StringIO(CONFIG_FILE + textwrap.dedent(extra))
return load_config(f)
def test_load_config_smoke():
load_test_config()
def test_load_config_from_file():
with tempfile.NamedTemporaryFile('w') as f:
f.write(CONFIG_FILE)
f.flush()
load_config(f.name)
def test_config_creates_storage_engine():
config = load_test_config()
with config.storage.transaction() as store:
store['bees'] = 'pony'
with config.storage.transaction() as store:
assert store['bees'] == 'pony'
def test_config_creates_directory():
config = load_test_config()
assert list(config.directory.all_users()) == []
def test_config_can_iterate_over_sections():
config = load_test_config()
assert set(config) == {'storage', 'directory', 'test_section', 'DEFAULT'}
def test_config_can_query_subsections():
config = load_test_config()
assert config['test_section']['test_key'] == 'test_value'
def test_config_can_test_section_inclusion():
config = load_test_config()
assert 'test_section' in config
assert 'test_section2' not in config
def test_config_section_len():
config = load_test_config()
assert len(config) == 4
def test_adds_extra_elements_to_path():
try:
sys.path.remove('/gravity')
except ValueError:
pass
load_test_config("""
[paths]
a_path = /gravity
""")
assert '/gravity' in sys.path
|
Add tests for config loadingimport io
import sys
import tempfile
import textwrap
from jacquard.config import load_config
CONFIG_FILE = """
[storage]
engine = dummy
url = dummy
[directory]
engine = dummy
[test_section]
test_key = test_value
"""
def load_test_config(extra=''):
f = io.StringIO(CONFIG_FILE + textwrap.dedent(extra))
return load_config(f)
def test_load_config_smoke():
load_test_config()
def test_load_config_from_file():
with tempfile.NamedTemporaryFile('w') as f:
f.write(CONFIG_FILE)
f.flush()
load_config(f.name)
def test_config_creates_storage_engine():
config = load_test_config()
with config.storage.transaction() as store:
store['bees'] = 'pony'
with config.storage.transaction() as store:
assert store['bees'] == 'pony'
def test_config_creates_directory():
config = load_test_config()
assert list(config.directory.all_users()) == []
def test_config_can_iterate_over_sections():
config = load_test_config()
assert set(config) == {'storage', 'directory', 'test_section', 'DEFAULT'}
def test_config_can_query_subsections():
config = load_test_config()
assert config['test_section']['test_key'] == 'test_value'
def test_config_can_test_section_inclusion():
config = load_test_config()
assert 'test_section' in config
assert 'test_section2' not in config
def test_config_section_len():
config = load_test_config()
assert len(config) == 4
def test_adds_extra_elements_to_path():
try:
sys.path.remove('/gravity')
except ValueError:
pass
load_test_config("""
[paths]
a_path = /gravity
""")
assert '/gravity' in sys.path
|
<commit_before><commit_msg>Add tests for config loading<commit_after>import io
import sys
import tempfile
import textwrap
from jacquard.config import load_config
CONFIG_FILE = """
[storage]
engine = dummy
url = dummy
[directory]
engine = dummy
[test_section]
test_key = test_value
"""
def load_test_config(extra=''):
f = io.StringIO(CONFIG_FILE + textwrap.dedent(extra))
return load_config(f)
def test_load_config_smoke():
load_test_config()
def test_load_config_from_file():
with tempfile.NamedTemporaryFile('w') as f:
f.write(CONFIG_FILE)
f.flush()
load_config(f.name)
def test_config_creates_storage_engine():
config = load_test_config()
with config.storage.transaction() as store:
store['bees'] = 'pony'
with config.storage.transaction() as store:
assert store['bees'] == 'pony'
def test_config_creates_directory():
config = load_test_config()
assert list(config.directory.all_users()) == []
def test_config_can_iterate_over_sections():
config = load_test_config()
assert set(config) == {'storage', 'directory', 'test_section', 'DEFAULT'}
def test_config_can_query_subsections():
config = load_test_config()
assert config['test_section']['test_key'] == 'test_value'
def test_config_can_test_section_inclusion():
config = load_test_config()
assert 'test_section' in config
assert 'test_section2' not in config
def test_config_section_len():
config = load_test_config()
assert len(config) == 4
def test_adds_extra_elements_to_path():
try:
sys.path.remove('/gravity')
except ValueError:
pass
load_test_config("""
[paths]
a_path = /gravity
""")
assert '/gravity' in sys.path
|
|
966548e142023eaa1326344c62dbce535095f53a
|
osf/migrations/0042_add_registration_registered_date_desc_index.py
|
osf/migrations/0042_add_registration_registered_date_desc_index.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 20:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0041_auto_20170308_1932'),
]
operations = [
migrations.RunSQL(
[
'CREATE INDEX osf_abstractnode_registered_date_index ON public.osf_abstractnode (registered_date DESC);',
"CREATE INDEX osf_abstractnode_registration_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.registration';"
"CREATE INDEX osf_abstractnode_node_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.node';"
"CREATE INDEX osf_abstractnode_collection_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.collection';"
],
[
'DROP INDEX public.osf_abstractnode_registered_date_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_registration_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_node_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_collection_pub_del_type_index RESTRICT;'
]
)
]
|
Add indexes to make the things faster.
|
Add indexes to make the things faster.
|
Python
|
apache-2.0
|
Johnetordoff/osf.io,sloria/osf.io,baylee-d/osf.io,binoculars/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,acshi/osf.io,leb2dg/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,felliott/osf.io,mattclark/osf.io,felliott/osf.io,laurenrevere/osf.io,icereval/osf.io,pattisdr/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,hmoco/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,acshi/osf.io,adlius/osf.io,icereval/osf.io,TomBaxter/osf.io,aaxelb/osf.io,chrisseto/osf.io,adlius/osf.io,mfraezz/osf.io,saradbowman/osf.io,sloria/osf.io,Johnetordoff/osf.io,felliott/osf.io,leb2dg/osf.io,acshi/osf.io,binoculars/osf.io,sloria/osf.io,baylee-d/osf.io,hmoco/osf.io,crcresearch/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,mattclark/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,aaxelb/osf.io,felliott/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,acshi/osf.io,cwisecarver/osf.io,chrisseto/osf.io,icereval/osf.io,adlius/osf.io,TomBaxter/osf.io,chennan47/osf.io,laurenrevere/osf.io,aaxelb/osf.io,mfraezz/osf.io,Nesiehr/osf.io,crcresearch/osf.io,caneruguz/osf.io,acshi/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,saradbowman/osf.io,hmoco/osf.io,caneruguz/osf.io,adlius/osf.io,caseyrollins/osf.io,cslzchen/osf.io,erinspace/osf.io,chrisseto/osf.io,Nesiehr/osf.io,crcresearch/osf.io,mfraezz/osf.io,mfraezz/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,chrisseto/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,cslzchen/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,cwisecarver/osf.io,erinspace/osf.io,hmoco/osf.io,leb2dg/osf.io,pattisdr/osf.io
|
Add indexes to make the things faster.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 20:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0041_auto_20170308_1932'),
]
operations = [
migrations.RunSQL(
[
'CREATE INDEX osf_abstractnode_registered_date_index ON public.osf_abstractnode (registered_date DESC);',
"CREATE INDEX osf_abstractnode_registration_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.registration';"
"CREATE INDEX osf_abstractnode_node_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.node';"
"CREATE INDEX osf_abstractnode_collection_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.collection';"
],
[
'DROP INDEX public.osf_abstractnode_registered_date_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_registration_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_node_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_collection_pub_del_type_index RESTRICT;'
]
)
]
|
<commit_before><commit_msg>Add indexes to make the things faster.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 20:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0041_auto_20170308_1932'),
]
operations = [
migrations.RunSQL(
[
'CREATE INDEX osf_abstractnode_registered_date_index ON public.osf_abstractnode (registered_date DESC);',
"CREATE INDEX osf_abstractnode_registration_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.registration';"
"CREATE INDEX osf_abstractnode_node_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.node';"
"CREATE INDEX osf_abstractnode_collection_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.collection';"
],
[
'DROP INDEX public.osf_abstractnode_registered_date_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_registration_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_node_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_collection_pub_del_type_index RESTRICT;'
]
)
]
|
Add indexes to make the things faster.# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 20:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0041_auto_20170308_1932'),
]
operations = [
migrations.RunSQL(
[
'CREATE INDEX osf_abstractnode_registered_date_index ON public.osf_abstractnode (registered_date DESC);',
"CREATE INDEX osf_abstractnode_registration_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.registration';"
"CREATE INDEX osf_abstractnode_node_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.node';"
"CREATE INDEX osf_abstractnode_collection_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.collection';"
],
[
'DROP INDEX public.osf_abstractnode_registered_date_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_registration_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_node_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_collection_pub_del_type_index RESTRICT;'
]
)
]
|
<commit_before><commit_msg>Add indexes to make the things faster.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 20:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0041_auto_20170308_1932'),
]
operations = [
migrations.RunSQL(
[
'CREATE INDEX osf_abstractnode_registered_date_index ON public.osf_abstractnode (registered_date DESC);',
"CREATE INDEX osf_abstractnode_registration_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.registration';"
"CREATE INDEX osf_abstractnode_node_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.node';"
"CREATE INDEX osf_abstractnode_collection_pub_del_type_index ON public.osf_abstractnode (is_public, is_deleted, type) WHERE is_public=TRUE and is_deleted=FALSE and type = 'osf.collection';"
],
[
'DROP INDEX public.osf_abstractnode_registered_date_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_registration_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_node_pub_del_type_index RESTRICT;'
'DROP INDEX public.osf_abstractnode_collection_pub_del_type_index RESTRICT;'
]
)
]
|
|
791e079bd31184f9ad6cd845bda29a49ffc85d5d
|
tests/grammar_term-nonterm_test/NonterminalsInvalidTest.py
|
tests/grammar_term-nonterm_test/NonterminalsInvalidTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.RawGrammar import RawGrammar
class NonterminalsInvalidTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for invalid nonterminals tests
|
Add file for invalid nonterminals tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for invalid nonterminals tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.RawGrammar import RawGrammar
class NonterminalsInvalidTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for invalid nonterminals tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.RawGrammar import RawGrammar
class NonterminalsInvalidTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for invalid nonterminals tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.RawGrammar import RawGrammar
class NonterminalsInvalidTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for invalid nonterminals tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.RawGrammar import RawGrammar
class NonterminalsInvalidTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
aa0a8d851f51b62e6fb5ee32c1fa95e70230b2ae
|
ilastik/core/classifiers/classifierRandomForestVariableImportance.py
|
ilastik/core/classifiers/classifierRandomForestVariableImportance.py
|
from classifierBase import *
import h5py
class ClassifierRandomForestVariableImportance(ClassifierBase):
#human readable information
name = "RandomForest classifier with variable importance"
description = "Basic RandomForest classifier with computation of variable importance"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
treeCount = 10
def __init__(self, treeCount = 10):
ClassifierBase.__init__(self)
self.treeCount = treeCount
self.oob = 0
self.variableImportance = numpy.zeros( (1, ) )
def train(self, features, labels, isInteractive):
if features.shape[0] != labels.shape[0]:
print " 3, 2 ,1 ... BOOOM!! #features != # labels"
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=self.treeCount)
if isInteractive:
self.oob = self.RF.learnRF(features, labels)
self.variableImportance = numpy.zeros( (1, ) )
else:
self.oob, self.variableImportance = self.RF.learnRFWithFeatureSelection(features, labels)
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile):
# cannot serialize into group because can not pass h5py handle to vigra yet
# works only with new RF version
tmp = self.RF.writeHDF5(fileName, pathInFile, True)
f = h5py.File(fileName, 'r+')
f.create_dataset(pathInFile+'/Variable importance', data=self.variableImportance)
f.create_dataset(pathInFile+'/OOB', data=self.oob)
f.close()
return tmp
@classmethod
def deserialize(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount
return classifier
|
Add variable selection to RF
|
Add variable selection to RF
|
Python
|
bsd-2-clause
|
ilastik/ilastik-0.5,ilastik/ilastik-0.5,ilastik/ilastik-0.5
|
Add variable selection to RF
|
from classifierBase import *
import h5py
class ClassifierRandomForestVariableImportance(ClassifierBase):
#human readable information
name = "RandomForest classifier with variable importance"
description = "Basic RandomForest classifier with computation of variable importance"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
treeCount = 10
def __init__(self, treeCount = 10):
ClassifierBase.__init__(self)
self.treeCount = treeCount
self.oob = 0
self.variableImportance = numpy.zeros( (1, ) )
def train(self, features, labels, isInteractive):
if features.shape[0] != labels.shape[0]:
print " 3, 2 ,1 ... BOOOM!! #features != # labels"
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=self.treeCount)
if isInteractive:
self.oob = self.RF.learnRF(features, labels)
self.variableImportance = numpy.zeros( (1, ) )
else:
self.oob, self.variableImportance = self.RF.learnRFWithFeatureSelection(features, labels)
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile):
# cannot serialize into group because can not pass h5py handle to vigra yet
# works only with new RF version
tmp = self.RF.writeHDF5(fileName, pathInFile, True)
f = h5py.File(fileName, 'r+')
f.create_dataset(pathInFile+'/Variable importance', data=self.variableImportance)
f.create_dataset(pathInFile+'/OOB', data=self.oob)
f.close()
return tmp
@classmethod
def deserialize(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount
return classifier
|
<commit_before><commit_msg>Add variable selection to RF<commit_after>
|
from classifierBase import *
import h5py
class ClassifierRandomForestVariableImportance(ClassifierBase):
#human readable information
name = "RandomForest classifier with variable importance"
description = "Basic RandomForest classifier with computation of variable importance"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
treeCount = 10
def __init__(self, treeCount = 10):
ClassifierBase.__init__(self)
self.treeCount = treeCount
self.oob = 0
self.variableImportance = numpy.zeros( (1, ) )
def train(self, features, labels, isInteractive):
if features.shape[0] != labels.shape[0]:
print " 3, 2 ,1 ... BOOOM!! #features != # labels"
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=self.treeCount)
if isInteractive:
self.oob = self.RF.learnRF(features, labels)
self.variableImportance = numpy.zeros( (1, ) )
else:
self.oob, self.variableImportance = self.RF.learnRFWithFeatureSelection(features, labels)
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile):
# cannot serialize into group because can not pass h5py handle to vigra yet
# works only with new RF version
tmp = self.RF.writeHDF5(fileName, pathInFile, True)
f = h5py.File(fileName, 'r+')
f.create_dataset(pathInFile+'/Variable importance', data=self.variableImportance)
f.create_dataset(pathInFile+'/OOB', data=self.oob)
f.close()
return tmp
@classmethod
def deserialize(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount
return classifier
|
Add variable selection to RFfrom classifierBase import *
import h5py
class ClassifierRandomForestVariableImportance(ClassifierBase):
#human readable information
name = "RandomForest classifier with variable importance"
description = "Basic RandomForest classifier with computation of variable importance"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
treeCount = 10
def __init__(self, treeCount = 10):
ClassifierBase.__init__(self)
self.treeCount = treeCount
self.oob = 0
self.variableImportance = numpy.zeros( (1, ) )
def train(self, features, labels, isInteractive):
if features.shape[0] != labels.shape[0]:
print " 3, 2 ,1 ... BOOOM!! #features != # labels"
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=self.treeCount)
if isInteractive:
self.oob = self.RF.learnRF(features, labels)
self.variableImportance = numpy.zeros( (1, ) )
else:
self.oob, self.variableImportance = self.RF.learnRFWithFeatureSelection(features, labels)
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile):
# cannot serialize into group because can not pass h5py handle to vigra yet
# works only with new RF version
tmp = self.RF.writeHDF5(fileName, pathInFile, True)
f = h5py.File(fileName, 'r+')
f.create_dataset(pathInFile+'/Variable importance', data=self.variableImportance)
f.create_dataset(pathInFile+'/OOB', data=self.oob)
f.close()
return tmp
@classmethod
def deserialize(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount
return classifier
|
<commit_before><commit_msg>Add variable selection to RF<commit_after>from classifierBase import *
import h5py
class ClassifierRandomForestVariableImportance(ClassifierBase):
#human readable information
name = "RandomForest classifier with variable importance"
description = "Basic RandomForest classifier with computation of variable importance"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
treeCount = 10
def __init__(self, treeCount = 10):
ClassifierBase.__init__(self)
self.treeCount = treeCount
self.oob = 0
self.variableImportance = numpy.zeros( (1, ) )
def train(self, features, labels, isInteractive):
if features.shape[0] != labels.shape[0]:
print " 3, 2 ,1 ... BOOOM!! #features != # labels"
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=self.treeCount)
if isInteractive:
self.oob = self.RF.learnRF(features, labels)
self.variableImportance = numpy.zeros( (1, ) )
else:
self.oob, self.variableImportance = self.RF.learnRFWithFeatureSelection(features, labels)
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile):
# cannot serialize into group because can not pass h5py handle to vigra yet
# works only with new RF version
tmp = self.RF.writeHDF5(fileName, pathInFile, True)
f = h5py.File(fileName, 'r+')
f.create_dataset(pathInFile+'/Variable importance', data=self.variableImportance)
f.create_dataset(pathInFile+'/OOB', data=self.oob)
f.close()
return tmp
@classmethod
def deserialize(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount
return classifier
|
|
bb0009a7d36b52b92ac988048ed72992c074b20e
|
tests/test_find.py
|
tests/test_find.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
from click.testing import CliRunner
import mock
from tldr import cli
class TestFind(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
def test_find_tldr_in_common(self):
result = self.runner.invoke(cli.find, ['tldr'])
assert result.output == (
'\n Simplified man pages\n\n- get typical usages of a command '
'(hint: this is how you got here!)\n\n tldr {{command}}\n'
)
def test_find_tcpflow_in_linux(self):
result = self.runner.invoke(cli.find, ['tcpflow'])
assert result.output == (
'\n Capture TCP traffic for debugging and analysis\n\n- Show all '
'data on the given interface and port\n\n tcpflow -c -i {{eth0}} '
'port {{80}}\n'
)
def test_can_not_find_something(self):
result = self.runner.invoke(cli.find, ['yoooooooooooooo'])
assert result.output == (
"Sorry, we don't support command: yoooooooooooooo right now.\nYou "
"can file an issue or send a PR on github:\n https://github.com"
"/tldr-pages/tldr\n"
)
def test_find_command_do_not_support_your_platform(self):
result = self.runner.invoke(cli.find, ['airport'])
assert result.output == (
'Sorry, command airport is not supported on your platform.\nYou '
'can file an issue or send a PR on github:\n https://github.'
'com/tldr-pages/tldr\n'
)
|
Add test for `tldr find {{command}}`
|
Add test for `tldr find {{command}}`
|
Python
|
mit
|
lord63/tldr.py
|
Add test for `tldr find {{command}}`
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
from click.testing import CliRunner
import mock
from tldr import cli
class TestFind(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
def test_find_tldr_in_common(self):
result = self.runner.invoke(cli.find, ['tldr'])
assert result.output == (
'\n Simplified man pages\n\n- get typical usages of a command '
'(hint: this is how you got here!)\n\n tldr {{command}}\n'
)
def test_find_tcpflow_in_linux(self):
result = self.runner.invoke(cli.find, ['tcpflow'])
assert result.output == (
'\n Capture TCP traffic for debugging and analysis\n\n- Show all '
'data on the given interface and port\n\n tcpflow -c -i {{eth0}} '
'port {{80}}\n'
)
def test_can_not_find_something(self):
result = self.runner.invoke(cli.find, ['yoooooooooooooo'])
assert result.output == (
"Sorry, we don't support command: yoooooooooooooo right now.\nYou "
"can file an issue or send a PR on github:\n https://github.com"
"/tldr-pages/tldr\n"
)
def test_find_command_do_not_support_your_platform(self):
result = self.runner.invoke(cli.find, ['airport'])
assert result.output == (
'Sorry, command airport is not supported on your platform.\nYou '
'can file an issue or send a PR on github:\n https://github.'
'com/tldr-pages/tldr\n'
)
|
<commit_before><commit_msg>Add test for `tldr find {{command}}`<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
from click.testing import CliRunner
import mock
from tldr import cli
class TestFind(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
def test_find_tldr_in_common(self):
result = self.runner.invoke(cli.find, ['tldr'])
assert result.output == (
'\n Simplified man pages\n\n- get typical usages of a command '
'(hint: this is how you got here!)\n\n tldr {{command}}\n'
)
def test_find_tcpflow_in_linux(self):
result = self.runner.invoke(cli.find, ['tcpflow'])
assert result.output == (
'\n Capture TCP traffic for debugging and analysis\n\n- Show all '
'data on the given interface and port\n\n tcpflow -c -i {{eth0}} '
'port {{80}}\n'
)
def test_can_not_find_something(self):
result = self.runner.invoke(cli.find, ['yoooooooooooooo'])
assert result.output == (
"Sorry, we don't support command: yoooooooooooooo right now.\nYou "
"can file an issue or send a PR on github:\n https://github.com"
"/tldr-pages/tldr\n"
)
def test_find_command_do_not_support_your_platform(self):
result = self.runner.invoke(cli.find, ['airport'])
assert result.output == (
'Sorry, command airport is not supported on your platform.\nYou '
'can file an issue or send a PR on github:\n https://github.'
'com/tldr-pages/tldr\n'
)
|
Add test for `tldr find {{command}}`#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
from click.testing import CliRunner
import mock
from tldr import cli
class TestFind(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
def test_find_tldr_in_common(self):
result = self.runner.invoke(cli.find, ['tldr'])
assert result.output == (
'\n Simplified man pages\n\n- get typical usages of a command '
'(hint: this is how you got here!)\n\n tldr {{command}}\n'
)
def test_find_tcpflow_in_linux(self):
result = self.runner.invoke(cli.find, ['tcpflow'])
assert result.output == (
'\n Capture TCP traffic for debugging and analysis\n\n- Show all '
'data on the given interface and port\n\n tcpflow -c -i {{eth0}} '
'port {{80}}\n'
)
def test_can_not_find_something(self):
result = self.runner.invoke(cli.find, ['yoooooooooooooo'])
assert result.output == (
"Sorry, we don't support command: yoooooooooooooo right now.\nYou "
"can file an issue or send a PR on github:\n https://github.com"
"/tldr-pages/tldr\n"
)
def test_find_command_do_not_support_your_platform(self):
result = self.runner.invoke(cli.find, ['airport'])
assert result.output == (
'Sorry, command airport is not supported on your platform.\nYou '
'can file an issue or send a PR on github:\n https://github.'
'com/tldr-pages/tldr\n'
)
|
<commit_before><commit_msg>Add test for `tldr find {{command}}`<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
from click.testing import CliRunner
import mock
from tldr import cli
class TestFind(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
def test_find_tldr_in_common(self):
result = self.runner.invoke(cli.find, ['tldr'])
assert result.output == (
'\n Simplified man pages\n\n- get typical usages of a command '
'(hint: this is how you got here!)\n\n tldr {{command}}\n'
)
def test_find_tcpflow_in_linux(self):
result = self.runner.invoke(cli.find, ['tcpflow'])
assert result.output == (
'\n Capture TCP traffic for debugging and analysis\n\n- Show all '
'data on the given interface and port\n\n tcpflow -c -i {{eth0}} '
'port {{80}}\n'
)
def test_can_not_find_something(self):
result = self.runner.invoke(cli.find, ['yoooooooooooooo'])
assert result.output == (
"Sorry, we don't support command: yoooooooooooooo right now.\nYou "
"can file an issue or send a PR on github:\n https://github.com"
"/tldr-pages/tldr\n"
)
def test_find_command_do_not_support_your_platform(self):
result = self.runner.invoke(cli.find, ['airport'])
assert result.output == (
'Sorry, command airport is not supported on your platform.\nYou '
'can file an issue or send a PR on github:\n https://github.'
'com/tldr-pages/tldr\n'
)
|
|
ffe0ce0c7ba2511ecc59a3a7f356ced8570c17f4
|
lambdas/stop_running_tasks.py
|
lambdas/stop_running_tasks.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Stop all the instances of a running task.
Our applications load config from an S3 bucket at startup. When the S3 config
changes, this Lambda is triggered. We need to stop any running instances
of the application, then the ECS scheduler will restart them and they'll
pick up their new config.
"""
import re
import boto3
def identify_cluster_by_app_name(ecs, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster in ecs.list_clusters()['clusterArns']:
for serviceArn in ecs.list_services(cluster=cluster)['serviceArns']:
_, serviceName = serviceArn.split('/')
if serviceName == app_name:
return cluster
def stop_running_tasks(app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
stop all the running instances of this application.
"""
ecs = boto3.client('ecs')
cluster = identify_cluster_by_app_name(ecs=ecs, app_name=app_name)
taskArns = ecs.list_tasks(
cluster=cluster,
serviceName=app_name
)['taskArns']
for task in taskArns:
ecs.stop_task(
cluster=cluster,
task=task,
reason='Restarting to pick up new configuration'
)
def main(event, _):
print('Received event: %r' % event)
records = event['Records']
assert len(records) == 1
changed_object_key = records[0]['s3']['object']['key']
match = re.match(r'^config/prod/(?P<app>[a-z_]+)\.ini', changed_object_key)
assert match is not None
app_name = match.group('app')
print('Stopping tasks for %s' % app_name)
stop_running_tasks(app_name=app_name)
|
Add a Python script that stops all running instances of a task
|
Add a Python script that stops all running instances of a task
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a Python script that stops all running instances of a task
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Stop all the instances of a running task.
Our applications load config from an S3 bucket at startup. When the S3 config
changes, this Lambda is triggered. We need to stop any running instances
of the application, then the ECS scheduler will restart them and they'll
pick up their new config.
"""
import re
import boto3
def identify_cluster_by_app_name(ecs, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster in ecs.list_clusters()['clusterArns']:
for serviceArn in ecs.list_services(cluster=cluster)['serviceArns']:
_, serviceName = serviceArn.split('/')
if serviceName == app_name:
return cluster
def stop_running_tasks(app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
stop all the running instances of this application.
"""
ecs = boto3.client('ecs')
cluster = identify_cluster_by_app_name(ecs=ecs, app_name=app_name)
taskArns = ecs.list_tasks(
cluster=cluster,
serviceName=app_name
)['taskArns']
for task in taskArns:
ecs.stop_task(
cluster=cluster,
task=task,
reason='Restarting to pick up new configuration'
)
def main(event, _):
print('Received event: %r' % event)
records = event['Records']
assert len(records) == 1
changed_object_key = records[0]['s3']['object']['key']
match = re.match(r'^config/prod/(?P<app>[a-z_]+)\.ini', changed_object_key)
assert match is not None
app_name = match.group('app')
print('Stopping tasks for %s' % app_name)
stop_running_tasks(app_name=app_name)
|
<commit_before><commit_msg>Add a Python script that stops all running instances of a task<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Stop all the instances of a running task.
Our applications load config from an S3 bucket at startup. When the S3 config
changes, this Lambda is triggered. We need to stop any running instances
of the application, then the ECS scheduler will restart them and they'll
pick up their new config.
"""
import re
import boto3
def identify_cluster_by_app_name(ecs, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster in ecs.list_clusters()['clusterArns']:
for serviceArn in ecs.list_services(cluster=cluster)['serviceArns']:
_, serviceName = serviceArn.split('/')
if serviceName == app_name:
return cluster
def stop_running_tasks(app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
stop all the running instances of this application.
"""
ecs = boto3.client('ecs')
cluster = identify_cluster_by_app_name(ecs=ecs, app_name=app_name)
taskArns = ecs.list_tasks(
cluster=cluster,
serviceName=app_name
)['taskArns']
for task in taskArns:
ecs.stop_task(
cluster=cluster,
task=task,
reason='Restarting to pick up new configuration'
)
def main(event, _):
print('Received event: %r' % event)
records = event['Records']
assert len(records) == 1
changed_object_key = records[0]['s3']['object']['key']
match = re.match(r'^config/prod/(?P<app>[a-z_]+)\.ini', changed_object_key)
assert match is not None
app_name = match.group('app')
print('Stopping tasks for %s' % app_name)
stop_running_tasks(app_name=app_name)
|
Add a Python script that stops all running instances of a task#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Stop all the instances of a running task.
Our applications load config from an S3 bucket at startup. When the S3 config
changes, this Lambda is triggered. We need to stop any running instances
of the application, then the ECS scheduler will restart them and they'll
pick up their new config.
"""
import re
import boto3
def identify_cluster_by_app_name(ecs, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster in ecs.list_clusters()['clusterArns']:
for serviceArn in ecs.list_services(cluster=cluster)['serviceArns']:
_, serviceName = serviceArn.split('/')
if serviceName == app_name:
return cluster
def stop_running_tasks(app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
stop all the running instances of this application.
"""
ecs = boto3.client('ecs')
cluster = identify_cluster_by_app_name(ecs=ecs, app_name=app_name)
taskArns = ecs.list_tasks(
cluster=cluster,
serviceName=app_name
)['taskArns']
for task in taskArns:
ecs.stop_task(
cluster=cluster,
task=task,
reason='Restarting to pick up new configuration'
)
def main(event, _):
print('Received event: %r' % event)
records = event['Records']
assert len(records) == 1
changed_object_key = records[0]['s3']['object']['key']
match = re.match(r'^config/prod/(?P<app>[a-z_]+)\.ini', changed_object_key)
assert match is not None
app_name = match.group('app')
print('Stopping tasks for %s' % app_name)
stop_running_tasks(app_name=app_name)
|
<commit_before><commit_msg>Add a Python script that stops all running instances of a task<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Stop all the instances of a running task.
Our applications load config from an S3 bucket at startup. When the S3 config
changes, this Lambda is triggered. We need to stop any running instances
of the application, then the ECS scheduler will restart them and they'll
pick up their new config.
"""
import re
import boto3
def identify_cluster_by_app_name(ecs, app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
return the ARN of the cluster the task runs on.
"""
for cluster in ecs.list_clusters()['clusterArns']:
for serviceArn in ecs.list_services(cluster=cluster)['serviceArns']:
_, serviceName = serviceArn.split('/')
if serviceName == app_name:
return cluster
def stop_running_tasks(app_name):
"""
Given the name of one of our applications (e.g. api, calm_adapter),
stop all the running instances of this application.
"""
ecs = boto3.client('ecs')
cluster = identify_cluster_by_app_name(ecs=ecs, app_name=app_name)
taskArns = ecs.list_tasks(
cluster=cluster,
serviceName=app_name
)['taskArns']
for task in taskArns:
ecs.stop_task(
cluster=cluster,
task=task,
reason='Restarting to pick up new configuration'
)
def main(event, _):
print('Received event: %r' % event)
records = event['Records']
assert len(records) == 1
changed_object_key = records[0]['s3']['object']['key']
match = re.match(r'^config/prod/(?P<app>[a-z_]+)\.ini', changed_object_key)
assert match is not None
app_name = match.group('app')
print('Stopping tasks for %s' % app_name)
stop_running_tasks(app_name=app_name)
|
|
228ff64938268e454eb4c66db3ceaf63a4692272
|
l10n_it_split_payment/migrations/14.0.1.0.0/pre-move_split_amount.py
|
l10n_it_split_payment/migrations/14.0.1.0.0/pre-move_split_amount.py
|
# Copyright 2021 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
openupgrade.add_fields(
env,
[("amount_sp", "account.move", False, "float", False, "l10n_it_split_payment")],
)
openupgrade.logged_query(
env.cr,
"""
update account_move
set
amount_sp = inv.amount_sp
from account_invoice inv
where
account_move.id = inv.move_id;
""",
)
|
Move amount_sp to account_move Otherwise the value is recomputed and the following error might occur: Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.
|
Move amount_sp to account_move
Otherwise the value is recomputed and the following error might occur:
Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.
|
Python
|
agpl-3.0
|
OCA/l10n-italy,OCA/l10n-italy,OCA/l10n-italy,dcorio/l10n-italy,dcorio/l10n-italy,dcorio/l10n-italy
|
Move amount_sp to account_move
Otherwise the value is recomputed and the following error might occur:
Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.
|
# Copyright 2021 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
openupgrade.add_fields(
env,
[("amount_sp", "account.move", False, "float", False, "l10n_it_split_payment")],
)
openupgrade.logged_query(
env.cr,
"""
update account_move
set
amount_sp = inv.amount_sp
from account_invoice inv
where
account_move.id = inv.move_id;
""",
)
|
<commit_before><commit_msg>Move amount_sp to account_move
Otherwise the value is recomputed and the following error might occur:
Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.<commit_after>
|
# Copyright 2021 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
openupgrade.add_fields(
env,
[("amount_sp", "account.move", False, "float", False, "l10n_it_split_payment")],
)
openupgrade.logged_query(
env.cr,
"""
update account_move
set
amount_sp = inv.amount_sp
from account_invoice inv
where
account_move.id = inv.move_id;
""",
)
|
Move amount_sp to account_move
Otherwise the value is recomputed and the following error might occur:
Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.# Copyright 2021 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
openupgrade.add_fields(
env,
[("amount_sp", "account.move", False, "float", False, "l10n_it_split_payment")],
)
openupgrade.logged_query(
env.cr,
"""
update account_move
set
amount_sp = inv.amount_sp
from account_invoice inv
where
account_move.id = inv.move_id;
""",
)
|
<commit_before><commit_msg>Move amount_sp to account_move
Otherwise the value is recomputed and the following error might occur:
Impossibile aggiungere/modificare registrazioni antecedenti o pari alla data di chiusura 24/09/2020.<commit_after># Copyright 2021 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
openupgrade.add_fields(
env,
[("amount_sp", "account.move", False, "float", False, "l10n_it_split_payment")],
)
openupgrade.logged_query(
env.cr,
"""
update account_move
set
amount_sp = inv.amount_sp
from account_invoice inv
where
account_move.id = inv.move_id;
""",
)
|
|
1c496de7ce5aeb9e7d322290d8250aa73d32e6cf
|
museum_site/migrations/0061_file_license.py
|
museum_site/migrations/0061_file_license.py
|
# Generated by Django 3.2.4 on 2021-06-11 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0060_auto_20210611_0425'),
]
operations = [
migrations.AddField(
model_name='file',
name='license',
field=models.CharField(default='Unknown', max_length=150),
),
]
|
Add license field to files
|
Add license field to files
|
Python
|
mit
|
DrDos0016/z2,DrDos0016/z2,DrDos0016/z2
|
Add license field to files
|
# Generated by Django 3.2.4 on 2021-06-11 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0060_auto_20210611_0425'),
]
operations = [
migrations.AddField(
model_name='file',
name='license',
field=models.CharField(default='Unknown', max_length=150),
),
]
|
<commit_before><commit_msg>Add license field to files<commit_after>
|
# Generated by Django 3.2.4 on 2021-06-11 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0060_auto_20210611_0425'),
]
operations = [
migrations.AddField(
model_name='file',
name='license',
field=models.CharField(default='Unknown', max_length=150),
),
]
|
Add license field to files# Generated by Django 3.2.4 on 2021-06-11 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0060_auto_20210611_0425'),
]
operations = [
migrations.AddField(
model_name='file',
name='license',
field=models.CharField(default='Unknown', max_length=150),
),
]
|
<commit_before><commit_msg>Add license field to files<commit_after># Generated by Django 3.2.4 on 2021-06-11 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0060_auto_20210611_0425'),
]
operations = [
migrations.AddField(
model_name='file',
name='license',
field=models.CharField(default='Unknown', max_length=150),
),
]
|
|
5eea40913683cde09b097cb3069cc6fd85809f23
|
lintcode/Easy/109_Triangle.py
|
lintcode/Easy/109_Triangle.py
|
class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
Add solution to lintcode question 109
|
Add solution to lintcode question 109
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 109
|
class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
<commit_before><commit_msg>Add solution to lintcode question 109<commit_after>
|
class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
Add solution to lintcode question 109class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
<commit_before><commit_msg>Add solution to lintcode question 109<commit_after>class Solution:
"""
@param triangle: a list of lists of integers.
@return: An integer, minimum path sum.
"""
def minimumTotal(self, triangle):
# write your code here
res = triangle[0]
for i in range(1, len(triangle)):
for j in range(len(triangle[i]) - 1, -1, -1):
left = triangle[i][j] + (res[j] if j < len(res) else sys.maxint)
right = triangle[i][j] + (res[j - 1] if j - 1 >= 0 else sys.maxint)
if (j >= len(res)):
res.append(min(left, right))
else:
res[j] = min(left, right)
return min(res)
|
|
d1cafed7ecb9e3fd92a1a7f405bd6832086ff30c
|
test/integration/ggrc/notifications/test_assignable_notifications.py
|
test/integration/ggrc/notifications/test_assignable_notifications.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from freezegun import freeze_time
from datetime import datetime
from mock import patch
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import Request
from integration.ggrc import converters
from integration.ggrc.models import factories
from integration.ggrc.generator import ObjectGenerator
class TestAssignableNotification(converters.TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
converters.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
def _fix_notification_init(self):
"""Fix Notification object init function
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def _get_unsent_notifications(self):
return Notification.query.filter(Notification.sent_at.is_(None))
@patch("ggrc.notifications.common.send_email")
def test_request_without_verifiers(self, mock_mail):
with freeze_time("2015-04-01"):
self.import_file("request_full_no_warnings.csv")
self.assertEqual(self._get_unsent_notifications().count(), 12)
self.client.get("/_notifications/send_todays_digest")
self.assertEqual(self._get_unsent_notifications().count(), 0)
|
Add basic tests for request notifications
|
Add basic tests for request notifications
Test that imported requests get entries in the notifications table.
|
Python
|
apache-2.0
|
VinnieJohns/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
|
Add basic tests for request notifications
Test that imported requests get entries in the notifications table.
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from freezegun import freeze_time
from datetime import datetime
from mock import patch
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import Request
from integration.ggrc import converters
from integration.ggrc.models import factories
from integration.ggrc.generator import ObjectGenerator
class TestAssignableNotification(converters.TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
converters.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
def _fix_notification_init(self):
"""Fix Notification object init function
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def _get_unsent_notifications(self):
return Notification.query.filter(Notification.sent_at.is_(None))
@patch("ggrc.notifications.common.send_email")
def test_request_without_verifiers(self, mock_mail):
with freeze_time("2015-04-01"):
self.import_file("request_full_no_warnings.csv")
self.assertEqual(self._get_unsent_notifications().count(), 12)
self.client.get("/_notifications/send_todays_digest")
self.assertEqual(self._get_unsent_notifications().count(), 0)
|
<commit_before><commit_msg>Add basic tests for request notifications
Test that imported requests get entries in the notifications table.<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from freezegun import freeze_time
from datetime import datetime
from mock import patch
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import Request
from integration.ggrc import converters
from integration.ggrc.models import factories
from integration.ggrc.generator import ObjectGenerator
class TestAssignableNotification(converters.TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
converters.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
def _fix_notification_init(self):
"""Fix Notification object init function
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def _get_unsent_notifications(self):
return Notification.query.filter(Notification.sent_at.is_(None))
@patch("ggrc.notifications.common.send_email")
def test_request_without_verifiers(self, mock_mail):
with freeze_time("2015-04-01"):
self.import_file("request_full_no_warnings.csv")
self.assertEqual(self._get_unsent_notifications().count(), 12)
self.client.get("/_notifications/send_todays_digest")
self.assertEqual(self._get_unsent_notifications().count(), 0)
|
Add basic tests for request notifications
Test that imported requests get entries in the notifications table.# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from freezegun import freeze_time
from datetime import datetime
from mock import patch
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import Request
from integration.ggrc import converters
from integration.ggrc.models import factories
from integration.ggrc.generator import ObjectGenerator
class TestAssignableNotification(converters.TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
converters.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
def _fix_notification_init(self):
"""Fix Notification object init function
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def _get_unsent_notifications(self):
return Notification.query.filter(Notification.sent_at.is_(None))
@patch("ggrc.notifications.common.send_email")
def test_request_without_verifiers(self, mock_mail):
with freeze_time("2015-04-01"):
self.import_file("request_full_no_warnings.csv")
self.assertEqual(self._get_unsent_notifications().count(), 12)
self.client.get("/_notifications/send_todays_digest")
self.assertEqual(self._get_unsent_notifications().count(), 0)
|
<commit_before><commit_msg>Add basic tests for request notifications
Test that imported requests get entries in the notifications table.<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from freezegun import freeze_time
from datetime import datetime
from mock import patch
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import Request
from integration.ggrc import converters
from integration.ggrc.models import factories
from integration.ggrc.generator import ObjectGenerator
class TestAssignableNotification(converters.TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
converters.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
def _fix_notification_init(self):
"""Fix Notification object init function
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def _get_unsent_notifications(self):
return Notification.query.filter(Notification.sent_at.is_(None))
@patch("ggrc.notifications.common.send_email")
def test_request_without_verifiers(self, mock_mail):
with freeze_time("2015-04-01"):
self.import_file("request_full_no_warnings.csv")
self.assertEqual(self._get_unsent_notifications().count(), 12)
self.client.get("/_notifications/send_todays_digest")
self.assertEqual(self._get_unsent_notifications().count(), 0)
|
|
c1ebc02200e7b934ae6f1f5a40dc6c513b495a99
|
monitor.py
|
monitor.py
|
import re
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
p = Popen(('stdbuf', '-oL', 'udevadm', 'monitor', '-k'), stdout=PIPE,
stderr=STDOUT, bufsize=1)
c = re.compile(r'KERNEL\[[^]]*\]\s*add\s*(?P<dev_path>\S*)\s*\(block\)')
for line in iter(p.stdout.readline, b''):
m = c.match(line)
if m:
dev_path = m.groupdict()['dev_path']
print dev_path
|
Print inserted block devices' paths
|
Print inserted block devices' paths
|
Python
|
mit
|
drkitty/arise
|
Print inserted block devices' paths
|
import re
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
p = Popen(('stdbuf', '-oL', 'udevadm', 'monitor', '-k'), stdout=PIPE,
stderr=STDOUT, bufsize=1)
c = re.compile(r'KERNEL\[[^]]*\]\s*add\s*(?P<dev_path>\S*)\s*\(block\)')
for line in iter(p.stdout.readline, b''):
m = c.match(line)
if m:
dev_path = m.groupdict()['dev_path']
print dev_path
|
<commit_before><commit_msg>Print inserted block devices' paths<commit_after>
|
import re
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
p = Popen(('stdbuf', '-oL', 'udevadm', 'monitor', '-k'), stdout=PIPE,
stderr=STDOUT, bufsize=1)
c = re.compile(r'KERNEL\[[^]]*\]\s*add\s*(?P<dev_path>\S*)\s*\(block\)')
for line in iter(p.stdout.readline, b''):
m = c.match(line)
if m:
dev_path = m.groupdict()['dev_path']
print dev_path
|
Print inserted block devices' pathsimport re
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
p = Popen(('stdbuf', '-oL', 'udevadm', 'monitor', '-k'), stdout=PIPE,
stderr=STDOUT, bufsize=1)
c = re.compile(r'KERNEL\[[^]]*\]\s*add\s*(?P<dev_path>\S*)\s*\(block\)')
for line in iter(p.stdout.readline, b''):
m = c.match(line)
if m:
dev_path = m.groupdict()['dev_path']
print dev_path
|
<commit_before><commit_msg>Print inserted block devices' paths<commit_after>import re
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
p = Popen(('stdbuf', '-oL', 'udevadm', 'monitor', '-k'), stdout=PIPE,
stderr=STDOUT, bufsize=1)
c = re.compile(r'KERNEL\[[^]]*\]\s*add\s*(?P<dev_path>\S*)\s*\(block\)')
for line in iter(p.stdout.readline, b''):
m = c.match(line)
if m:
dev_path = m.groupdict()['dev_path']
print dev_path
|
|
ac41939561575f60ed028101d9bf76bef8705829
|
parliament/bills/vote_urls.py
|
parliament/bills/vote_urls.py
|
from django.conf.urls import *
urlpatterns = patterns('parliament.bills.views',
url(r'^$', 'votes_for_session', name='votes'),
(r'^(?:session/)?(?P<session_id>\d+-\d)/$', 'votes_for_session'),
url(r'^(?P<session_id>\d+-\d)/(?P<number>\d+)/$', 'vote', name='vote'),
(r'^(?P<vote_id>\d+)/$', 'vote_pk_redirect'),
url(r'^ballots/$', 'ballots', name='vote_ballots'),
)
|
Add new urls file for votes
|
Add new urls file for votes
|
Python
|
agpl-3.0
|
litui/openparliament,litui/openparliament,rhymeswithcycle/openparliament,twhyte/openparliament,twhyte/openparliament,rhymeswithcycle/openparliament,rhymeswithcycle/openparliament,twhyte/openparliament,litui/openparliament
|
Add new urls file for votes
|
from django.conf.urls import *
urlpatterns = patterns('parliament.bills.views',
url(r'^$', 'votes_for_session', name='votes'),
(r'^(?:session/)?(?P<session_id>\d+-\d)/$', 'votes_for_session'),
url(r'^(?P<session_id>\d+-\d)/(?P<number>\d+)/$', 'vote', name='vote'),
(r'^(?P<vote_id>\d+)/$', 'vote_pk_redirect'),
url(r'^ballots/$', 'ballots', name='vote_ballots'),
)
|
<commit_before><commit_msg>Add new urls file for votes<commit_after>
|
from django.conf.urls import *
urlpatterns = patterns('parliament.bills.views',
url(r'^$', 'votes_for_session', name='votes'),
(r'^(?:session/)?(?P<session_id>\d+-\d)/$', 'votes_for_session'),
url(r'^(?P<session_id>\d+-\d)/(?P<number>\d+)/$', 'vote', name='vote'),
(r'^(?P<vote_id>\d+)/$', 'vote_pk_redirect'),
url(r'^ballots/$', 'ballots', name='vote_ballots'),
)
|
Add new urls file for votesfrom django.conf.urls import *
urlpatterns = patterns('parliament.bills.views',
url(r'^$', 'votes_for_session', name='votes'),
(r'^(?:session/)?(?P<session_id>\d+-\d)/$', 'votes_for_session'),
url(r'^(?P<session_id>\d+-\d)/(?P<number>\d+)/$', 'vote', name='vote'),
(r'^(?P<vote_id>\d+)/$', 'vote_pk_redirect'),
url(r'^ballots/$', 'ballots', name='vote_ballots'),
)
|
<commit_before><commit_msg>Add new urls file for votes<commit_after>from django.conf.urls import *
urlpatterns = patterns('parliament.bills.views',
url(r'^$', 'votes_for_session', name='votes'),
(r'^(?:session/)?(?P<session_id>\d+-\d)/$', 'votes_for_session'),
url(r'^(?P<session_id>\d+-\d)/(?P<number>\d+)/$', 'vote', name='vote'),
(r'^(?P<vote_id>\d+)/$', 'vote_pk_redirect'),
url(r'^ballots/$', 'ballots', name='vote_ballots'),
)
|
|
37bc3da6fe5bcd8847d2fadc54100c18ab5b7730
|
prolog/interpreter/test/test_callable_arg_interface.py
|
prolog/interpreter/test/test_callable_arg_interface.py
|
from prolog.interpreter.parsing import parse_file, TermBuilder
from prolog.interpreter.term import Atom, Number, Term
import py
def parse(inp):
t = parse_file(inp)
builder = TermBuilder()
return builder.build(t)
atom = parse('a.')[0]
term = parse('t(a, b, c, d, f).')[0]
def test_atom_get_signature():
r = atom.get_prolog_signature()
r.name == '/'
r._args[0] == Atom('a')
r._args[1] == Number(0)
def test_atom_get_arguments():
assert atom.arguments() == []
def test_atom_arguemtn_count():
assert atom.argument_count() == 0
def test_atom_get_argument_at():
assert py.test.raises(IndexError, 'atom.argument_at(0)')
def test_term_get_signature():
r = term.get_prolog_signature()
print r
assert r.name == '/'
assert r._args[0].name == 't'
assert r._args[1].num == 5
def test_term_get_arguments():
t = term.arguments()
assert isinstance(t, list)
assert len(t) == 5
def test_term_get_argument_out_of_range():
py.test.raises(IndexError, 'term.argument_at(5)')
def test_term_get_argument_in_range():
t = term.argument_at(2)
assert t.name == 'c'
def test_term_argument_count():
assert term.argument_count() == 5
|
Refactor translated main to work with new continuations and some minor changes for the translation to work
|
Refactor translated main to work with new continuations and some minor changes for the translation to work
|
Python
|
mit
|
cosmoharrigan/pyrolog
|
Refactor translated main to work with new continuations and some minor changes for the translation to work
|
from prolog.interpreter.parsing import parse_file, TermBuilder
from prolog.interpreter.term import Atom, Number, Term
import py
def parse(inp):
t = parse_file(inp)
builder = TermBuilder()
return builder.build(t)
atom = parse('a.')[0]
term = parse('t(a, b, c, d, f).')[0]
def test_atom_get_signature():
r = atom.get_prolog_signature()
r.name == '/'
r._args[0] == Atom('a')
r._args[1] == Number(0)
def test_atom_get_arguments():
assert atom.arguments() == []
def test_atom_arguemtn_count():
assert atom.argument_count() == 0
def test_atom_get_argument_at():
assert py.test.raises(IndexError, 'atom.argument_at(0)')
def test_term_get_signature():
r = term.get_prolog_signature()
print r
assert r.name == '/'
assert r._args[0].name == 't'
assert r._args[1].num == 5
def test_term_get_arguments():
t = term.arguments()
assert isinstance(t, list)
assert len(t) == 5
def test_term_get_argument_out_of_range():
py.test.raises(IndexError, 'term.argument_at(5)')
def test_term_get_argument_in_range():
t = term.argument_at(2)
assert t.name == 'c'
def test_term_argument_count():
assert term.argument_count() == 5
|
<commit_before><commit_msg>Refactor translated main to work with new continuations and some minor changes for the translation to work<commit_after>
|
from prolog.interpreter.parsing import parse_file, TermBuilder
from prolog.interpreter.term import Atom, Number, Term
import py
def parse(inp):
t = parse_file(inp)
builder = TermBuilder()
return builder.build(t)
atom = parse('a.')[0]
term = parse('t(a, b, c, d, f).')[0]
def test_atom_get_signature():
r = atom.get_prolog_signature()
r.name == '/'
r._args[0] == Atom('a')
r._args[1] == Number(0)
def test_atom_get_arguments():
assert atom.arguments() == []
def test_atom_arguemtn_count():
assert atom.argument_count() == 0
def test_atom_get_argument_at():
assert py.test.raises(IndexError, 'atom.argument_at(0)')
def test_term_get_signature():
r = term.get_prolog_signature()
print r
assert r.name == '/'
assert r._args[0].name == 't'
assert r._args[1].num == 5
def test_term_get_arguments():
t = term.arguments()
assert isinstance(t, list)
assert len(t) == 5
def test_term_get_argument_out_of_range():
py.test.raises(IndexError, 'term.argument_at(5)')
def test_term_get_argument_in_range():
t = term.argument_at(2)
assert t.name == 'c'
def test_term_argument_count():
assert term.argument_count() == 5
|
Refactor translated main to work with new continuations and some minor changes for the translation to workfrom prolog.interpreter.parsing import parse_file, TermBuilder
from prolog.interpreter.term import Atom, Number, Term
import py
def parse(inp):
t = parse_file(inp)
builder = TermBuilder()
return builder.build(t)
atom = parse('a.')[0]
term = parse('t(a, b, c, d, f).')[0]
def test_atom_get_signature():
r = atom.get_prolog_signature()
r.name == '/'
r._args[0] == Atom('a')
r._args[1] == Number(0)
def test_atom_get_arguments():
assert atom.arguments() == []
def test_atom_arguemtn_count():
assert atom.argument_count() == 0
def test_atom_get_argument_at():
assert py.test.raises(IndexError, 'atom.argument_at(0)')
def test_term_get_signature():
r = term.get_prolog_signature()
print r
assert r.name == '/'
assert r._args[0].name == 't'
assert r._args[1].num == 5
def test_term_get_arguments():
t = term.arguments()
assert isinstance(t, list)
assert len(t) == 5
def test_term_get_argument_out_of_range():
py.test.raises(IndexError, 'term.argument_at(5)')
def test_term_get_argument_in_range():
t = term.argument_at(2)
assert t.name == 'c'
def test_term_argument_count():
assert term.argument_count() == 5
|
<commit_before><commit_msg>Refactor translated main to work with new continuations and some minor changes for the translation to work<commit_after>from prolog.interpreter.parsing import parse_file, TermBuilder
from prolog.interpreter.term import Atom, Number, Term
import py
def parse(inp):
t = parse_file(inp)
builder = TermBuilder()
return builder.build(t)
atom = parse('a.')[0]
term = parse('t(a, b, c, d, f).')[0]
def test_atom_get_signature():
r = atom.get_prolog_signature()
r.name == '/'
r._args[0] == Atom('a')
r._args[1] == Number(0)
def test_atom_get_arguments():
assert atom.arguments() == []
def test_atom_arguemtn_count():
assert atom.argument_count() == 0
def test_atom_get_argument_at():
assert py.test.raises(IndexError, 'atom.argument_at(0)')
def test_term_get_signature():
r = term.get_prolog_signature()
print r
assert r.name == '/'
assert r._args[0].name == 't'
assert r._args[1].num == 5
def test_term_get_arguments():
t = term.arguments()
assert isinstance(t, list)
assert len(t) == 5
def test_term_get_argument_out_of_range():
py.test.raises(IndexError, 'term.argument_at(5)')
def test_term_get_argument_in_range():
t = term.argument_at(2)
assert t.name == 'c'
def test_term_argument_count():
assert term.argument_count() == 5
|
|
d1ad98f58ea6a30269221a0d98c7c6cf544e90ec
|
get_images.py
|
get_images.py
|
from __future__ import with_statement
import urllib2, os, cStringIO
from bs4 import BeautifulSoup
from collections import defaultdict
DATA_DIR = 'data/'
IMG_DIR = 'data/images/'
DATA = 'data/testdata'
INTO_YOU = 'into you'
NOT_INTO_YOU = 'not into you'
VERDICT_STILL_OUT = 'verdict still out'
verdict_map = {"He's into you": INTO_YOU, "He's not into you": NOT_INTO_YOU,
"Verdict is still out": VERDICT_STILL_OUT}
def mk_file_if_ne (fname):
if not os.path.exists(fname):
os.makedirs(fname)
if __name__ == '__main__':
# make data dirs if needed
mk_file_if_ne(DATA_DIR)
mk_file_if_ne(IMG_DIR)
# get data
with open(DATA) as f:
raw = f.read()
# get all pics and votings
soup = BeautifulSoup(raw)
for div in soup.findAll('div', {'class': 'post-content'}):
img_src = None
print "COW"
for match in div.findAll('div', {'class': 'field-text-image'}):
img_src = match.img['src']
print img_src
break
countmap = defaultdict(lambda: 0)
for match in div.findAll('div', {'class': 'the-verdict'}):
for li in match.div.ul.findAll('li'):
count_type = li.a.text
count = int(li.text.replace(li.a.text, ""))
countmap[verdict_map[count_type]] = count
print count
print countmap
if img_src != None:
break
|
Add sketch of script for getting images and verdict data from site
|
Add sketch of script for getting images and verdict data from site
|
Python
|
mit
|
hausdorff/i-like-you,hausdorff/i-like-you
|
Add sketch of script for getting images and verdict data from site
|
from __future__ import with_statement
import urllib2, os, cStringIO
from bs4 import BeautifulSoup
from collections import defaultdict
DATA_DIR = 'data/'
IMG_DIR = 'data/images/'
DATA = 'data/testdata'
INTO_YOU = 'into you'
NOT_INTO_YOU = 'not into you'
VERDICT_STILL_OUT = 'verdict still out'
verdict_map = {"He's into you": INTO_YOU, "He's not into you": NOT_INTO_YOU,
"Verdict is still out": VERDICT_STILL_OUT}
def mk_file_if_ne (fname):
if not os.path.exists(fname):
os.makedirs(fname)
if __name__ == '__main__':
# make data dirs if needed
mk_file_if_ne(DATA_DIR)
mk_file_if_ne(IMG_DIR)
# get data
with open(DATA) as f:
raw = f.read()
# get all pics and votings
soup = BeautifulSoup(raw)
for div in soup.findAll('div', {'class': 'post-content'}):
img_src = None
print "COW"
for match in div.findAll('div', {'class': 'field-text-image'}):
img_src = match.img['src']
print img_src
break
countmap = defaultdict(lambda: 0)
for match in div.findAll('div', {'class': 'the-verdict'}):
for li in match.div.ul.findAll('li'):
count_type = li.a.text
count = int(li.text.replace(li.a.text, ""))
countmap[verdict_map[count_type]] = count
print count
print countmap
if img_src != None:
break
|
<commit_before><commit_msg>Add sketch of script for getting images and verdict data from site<commit_after>
|
from __future__ import with_statement
import urllib2, os, cStringIO
from bs4 import BeautifulSoup
from collections import defaultdict
DATA_DIR = 'data/'
IMG_DIR = 'data/images/'
DATA = 'data/testdata'
INTO_YOU = 'into you'
NOT_INTO_YOU = 'not into you'
VERDICT_STILL_OUT = 'verdict still out'
verdict_map = {"He's into you": INTO_YOU, "He's not into you": NOT_INTO_YOU,
"Verdict is still out": VERDICT_STILL_OUT}
def mk_file_if_ne (fname):
if not os.path.exists(fname):
os.makedirs(fname)
if __name__ == '__main__':
# make data dirs if needed
mk_file_if_ne(DATA_DIR)
mk_file_if_ne(IMG_DIR)
# get data
with open(DATA) as f:
raw = f.read()
# get all pics and votings
soup = BeautifulSoup(raw)
for div in soup.findAll('div', {'class': 'post-content'}):
img_src = None
print "COW"
for match in div.findAll('div', {'class': 'field-text-image'}):
img_src = match.img['src']
print img_src
break
countmap = defaultdict(lambda: 0)
for match in div.findAll('div', {'class': 'the-verdict'}):
for li in match.div.ul.findAll('li'):
count_type = li.a.text
count = int(li.text.replace(li.a.text, ""))
countmap[verdict_map[count_type]] = count
print count
print countmap
if img_src != None:
break
|
Add sketch of script for getting images and verdict data from sitefrom __future__ import with_statement
import urllib2, os, cStringIO
from bs4 import BeautifulSoup
from collections import defaultdict
DATA_DIR = 'data/'
IMG_DIR = 'data/images/'
DATA = 'data/testdata'
INTO_YOU = 'into you'
NOT_INTO_YOU = 'not into you'
VERDICT_STILL_OUT = 'verdict still out'
verdict_map = {"He's into you": INTO_YOU, "He's not into you": NOT_INTO_YOU,
"Verdict is still out": VERDICT_STILL_OUT}
def mk_file_if_ne (fname):
if not os.path.exists(fname):
os.makedirs(fname)
if __name__ == '__main__':
# make data dirs if needed
mk_file_if_ne(DATA_DIR)
mk_file_if_ne(IMG_DIR)
# get data
with open(DATA) as f:
raw = f.read()
# get all pics and votings
soup = BeautifulSoup(raw)
for div in soup.findAll('div', {'class': 'post-content'}):
img_src = None
print "COW"
for match in div.findAll('div', {'class': 'field-text-image'}):
img_src = match.img['src']
print img_src
break
countmap = defaultdict(lambda: 0)
for match in div.findAll('div', {'class': 'the-verdict'}):
for li in match.div.ul.findAll('li'):
count_type = li.a.text
count = int(li.text.replace(li.a.text, ""))
countmap[verdict_map[count_type]] = count
print count
print countmap
if img_src != None:
break
|
<commit_before><commit_msg>Add sketch of script for getting images and verdict data from site<commit_after>from __future__ import with_statement
import urllib2, os, cStringIO
from bs4 import BeautifulSoup
from collections import defaultdict
DATA_DIR = 'data/'
IMG_DIR = 'data/images/'
DATA = 'data/testdata'
INTO_YOU = 'into you'
NOT_INTO_YOU = 'not into you'
VERDICT_STILL_OUT = 'verdict still out'
verdict_map = {"He's into you": INTO_YOU, "He's not into you": NOT_INTO_YOU,
"Verdict is still out": VERDICT_STILL_OUT}
def mk_file_if_ne (fname):
if not os.path.exists(fname):
os.makedirs(fname)
if __name__ == '__main__':
# make data dirs if needed
mk_file_if_ne(DATA_DIR)
mk_file_if_ne(IMG_DIR)
# get data
with open(DATA) as f:
raw = f.read()
# get all pics and votings
soup = BeautifulSoup(raw)
for div in soup.findAll('div', {'class': 'post-content'}):
img_src = None
print "COW"
for match in div.findAll('div', {'class': 'field-text-image'}):
img_src = match.img['src']
print img_src
break
countmap = defaultdict(lambda: 0)
for match in div.findAll('div', {'class': 'the-verdict'}):
for li in match.div.ul.findAll('li'):
count_type = li.a.text
count = int(li.text.replace(li.a.text, ""))
countmap[verdict_map[count_type]] = count
print count
print countmap
if img_src != None:
break
|
|
a7bdab93801161224922c1324ecfdd16a4fe892f
|
registrations/test_models.py
|
registrations/test_models.py
|
from django.test import TestCase
from registrations.models import Registration
class RegistrationTests(TestCase):
def test_registration_status_external_id(self):
"""
If there is an external ID set, then the returned ID should be the
external ID, otherwise it should be the model's ID.
"""
reg = Registration(external_id='test-external', data={})
self.assertEqual(reg.status['registration_id'], 'test-external')
reg = Registration(data={})
self.assertEqual(reg.status['registration_id'], str(reg.id))
def test_registration_status_succeeded(self):
"""
If validated=True, then the status should be succeeded
"""
reg = Registration(validated=True)
self.assertEqual(reg.status['status'], 'succeeded')
def test_registration_status_validation_failed(self):
"""
If validated=False, and there are invalid_fields in the data, then
the status should be validation_failed, and the error should be the
invalid fields
"""
invalid_fields = {
'test-field': 'Test reason',
}
reg = Registration(data={'invalid_fields': invalid_fields})
self.assertEqual(reg.status['status'], 'validation_failed')
self.assertEqual(reg.status['error'], invalid_fields)
def test_registration_status_failed(self):
"""
If validated=False, and there is error_data in the data, then the
status should be failed, and the error should be the error data
"""
error_data = {
'test-error': 'error-data',
}
reg = Registration(data={'error_data': error_data})
self.assertEqual(reg.status['status'], 'failed')
self.assertEqual(reg.status['error'], error_data)
def test_registration_status_processing(self):
"""
If validated=False, but there is no error data in data, then the status
should be processing
"""
reg = Registration(data={})
self.assertEqual(reg.status['status'], 'processing')
|
Add tests for the registration status
|
Add tests for the registration status
|
Python
|
bsd-3-clause
|
praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub
|
Add tests for the registration status
|
from django.test import TestCase
from registrations.models import Registration
class RegistrationTests(TestCase):
def test_registration_status_external_id(self):
"""
If there is an external ID set, then the returned ID should be the
external ID, otherwise it should be the model's ID.
"""
reg = Registration(external_id='test-external', data={})
self.assertEqual(reg.status['registration_id'], 'test-external')
reg = Registration(data={})
self.assertEqual(reg.status['registration_id'], str(reg.id))
def test_registration_status_succeeded(self):
"""
If validated=True, then the status should be succeeded
"""
reg = Registration(validated=True)
self.assertEqual(reg.status['status'], 'succeeded')
def test_registration_status_validation_failed(self):
"""
If validated=False, and there are invalid_fields in the data, then
the status should be validation_failed, and the error should be the
invalid fields
"""
invalid_fields = {
'test-field': 'Test reason',
}
reg = Registration(data={'invalid_fields': invalid_fields})
self.assertEqual(reg.status['status'], 'validation_failed')
self.assertEqual(reg.status['error'], invalid_fields)
def test_registration_status_failed(self):
"""
If validated=False, and there is error_data in the data, then the
status should be failed, and the error should be the error data
"""
error_data = {
'test-error': 'error-data',
}
reg = Registration(data={'error_data': error_data})
self.assertEqual(reg.status['status'], 'failed')
self.assertEqual(reg.status['error'], error_data)
def test_registration_status_processing(self):
"""
If validated=False, but there is no error data in data, then the status
should be processing
"""
reg = Registration(data={})
self.assertEqual(reg.status['status'], 'processing')
|
<commit_before><commit_msg>Add tests for the registration status<commit_after>
|
from django.test import TestCase
from registrations.models import Registration
class RegistrationTests(TestCase):
def test_registration_status_external_id(self):
"""
If there is an external ID set, then the returned ID should be the
external ID, otherwise it should be the model's ID.
"""
reg = Registration(external_id='test-external', data={})
self.assertEqual(reg.status['registration_id'], 'test-external')
reg = Registration(data={})
self.assertEqual(reg.status['registration_id'], str(reg.id))
def test_registration_status_succeeded(self):
"""
If validated=True, then the status should be succeeded
"""
reg = Registration(validated=True)
self.assertEqual(reg.status['status'], 'succeeded')
def test_registration_status_validation_failed(self):
"""
If validated=False, and there are invalid_fields in the data, then
the status should be validation_failed, and the error should be the
invalid fields
"""
invalid_fields = {
'test-field': 'Test reason',
}
reg = Registration(data={'invalid_fields': invalid_fields})
self.assertEqual(reg.status['status'], 'validation_failed')
self.assertEqual(reg.status['error'], invalid_fields)
def test_registration_status_failed(self):
"""
If validated=False, and there is error_data in the data, then the
status should be failed, and the error should be the error data
"""
error_data = {
'test-error': 'error-data',
}
reg = Registration(data={'error_data': error_data})
self.assertEqual(reg.status['status'], 'failed')
self.assertEqual(reg.status['error'], error_data)
def test_registration_status_processing(self):
"""
If validated=False, but there is no error data in data, then the status
should be processing
"""
reg = Registration(data={})
self.assertEqual(reg.status['status'], 'processing')
|
Add tests for the registration statusfrom django.test import TestCase
from registrations.models import Registration
class RegistrationTests(TestCase):
def test_registration_status_external_id(self):
"""
If there is an external ID set, then the returned ID should be the
external ID, otherwise it should be the model's ID.
"""
reg = Registration(external_id='test-external', data={})
self.assertEqual(reg.status['registration_id'], 'test-external')
reg = Registration(data={})
self.assertEqual(reg.status['registration_id'], str(reg.id))
def test_registration_status_succeeded(self):
"""
If validated=True, then the status should be succeeded
"""
reg = Registration(validated=True)
self.assertEqual(reg.status['status'], 'succeeded')
def test_registration_status_validation_failed(self):
"""
If validated=False, and there are invalid_fields in the data, then
the status should be validation_failed, and the error should be the
invalid fields
"""
invalid_fields = {
'test-field': 'Test reason',
}
reg = Registration(data={'invalid_fields': invalid_fields})
self.assertEqual(reg.status['status'], 'validation_failed')
self.assertEqual(reg.status['error'], invalid_fields)
def test_registration_status_failed(self):
"""
If validated=False, and there is error_data in the data, then the
status should be failed, and the error should be the error data
"""
error_data = {
'test-error': 'error-data',
}
reg = Registration(data={'error_data': error_data})
self.assertEqual(reg.status['status'], 'failed')
self.assertEqual(reg.status['error'], error_data)
def test_registration_status_processing(self):
"""
If validated=False, but there is no error data in data, then the status
should be processing
"""
reg = Registration(data={})
self.assertEqual(reg.status['status'], 'processing')
|
<commit_before><commit_msg>Add tests for the registration status<commit_after>from django.test import TestCase
from registrations.models import Registration
class RegistrationTests(TestCase):
def test_registration_status_external_id(self):
"""
If there is an external ID set, then the returned ID should be the
external ID, otherwise it should be the model's ID.
"""
reg = Registration(external_id='test-external', data={})
self.assertEqual(reg.status['registration_id'], 'test-external')
reg = Registration(data={})
self.assertEqual(reg.status['registration_id'], str(reg.id))
def test_registration_status_succeeded(self):
"""
If validated=True, then the status should be succeeded
"""
reg = Registration(validated=True)
self.assertEqual(reg.status['status'], 'succeeded')
def test_registration_status_validation_failed(self):
"""
If validated=False, and there are invalid_fields in the data, then
the status should be validation_failed, and the error should be the
invalid fields
"""
invalid_fields = {
'test-field': 'Test reason',
}
reg = Registration(data={'invalid_fields': invalid_fields})
self.assertEqual(reg.status['status'], 'validation_failed')
self.assertEqual(reg.status['error'], invalid_fields)
def test_registration_status_failed(self):
"""
If validated=False, and there is error_data in the data, then the
status should be failed, and the error should be the error data
"""
error_data = {
'test-error': 'error-data',
}
reg = Registration(data={'error_data': error_data})
self.assertEqual(reg.status['status'], 'failed')
self.assertEqual(reg.status['error'], error_data)
def test_registration_status_processing(self):
"""
If validated=False, but there is no error data in data, then the status
should be processing
"""
reg = Registration(data={})
self.assertEqual(reg.status['status'], 'processing')
|
|
8baac989b56dfaf24a0fc3cbb77ed0d842604acc
|
tools/compare_snack_formants.py
|
tools/compare_snack_formants.py
|
# Script to compare Snack formant methods on Windows
# There is a known discrepancy between Snack formants calculated using
# the full Snack Tcl library ('tcl' method) and the Windows standalone
# executable ('exe' method)
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from opensauce.snack import snack_raw_formants, sformant_names
wav_dir = 'test/data/sound-files'
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw_tcl = snack_raw_formants(wav_file, 'tcl', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd = 'tclsh')
estimates_raw_exe = snack_raw_formants(wav_file, 'exe', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
fig = plt.figure(figsize=(8,8))
fig.suptitle('Snack formants - ' + os.path.basename(wav_file))
for i, n in enumerate(sformant_names):
ax = plt.subplot(4,2,i+1)
ax.plot(estimates_raw_exe[n], 'r.', estimates_raw_tcl[n], 'bo', markersize=1)
#ax.plot(estimates_raw_tcl[n], 'bo', estimates_raw_exe[n], 'r.', markersize=1)
ax.set_xticklabels([])
plt.title(n)
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.png')
|
Add script for comparing Snack formant methods on Windows
|
Add script for comparing Snack formant methods on Windows
This is for making the comparison Snack formant plots in
https://github.com/voicesauce/opensauce-python/issues/27
|
Python
|
apache-2.0
|
voicesauce/opensauce-python,voicesauce/opensauce-python,voicesauce/opensauce-python
|
Add script for comparing Snack formant methods on Windows
This is for making the comparison Snack formant plots in
https://github.com/voicesauce/opensauce-python/issues/27
|
# Script to compare Snack formant methods on Windows
# There is a known discrepancy between Snack formants calculated using
# the full Snack Tcl library ('tcl' method) and the Windows standalone
# executable ('exe' method)
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from opensauce.snack import snack_raw_formants, sformant_names
wav_dir = 'test/data/sound-files'
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw_tcl = snack_raw_formants(wav_file, 'tcl', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd = 'tclsh')
estimates_raw_exe = snack_raw_formants(wav_file, 'exe', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
fig = plt.figure(figsize=(8,8))
fig.suptitle('Snack formants - ' + os.path.basename(wav_file))
for i, n in enumerate(sformant_names):
ax = plt.subplot(4,2,i+1)
ax.plot(estimates_raw_exe[n], 'r.', estimates_raw_tcl[n], 'bo', markersize=1)
#ax.plot(estimates_raw_tcl[n], 'bo', estimates_raw_exe[n], 'r.', markersize=1)
ax.set_xticklabels([])
plt.title(n)
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.png')
|
<commit_before><commit_msg>Add script for comparing Snack formant methods on Windows
This is for making the comparison Snack formant plots in
https://github.com/voicesauce/opensauce-python/issues/27<commit_after>
|
# Script to compare Snack formant methods on Windows
# There is a known discrepancy between Snack formants calculated using
# the full Snack Tcl library ('tcl' method) and the Windows standalone
# executable ('exe' method)
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from opensauce.snack import snack_raw_formants, sformant_names
wav_dir = 'test/data/sound-files'
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw_tcl = snack_raw_formants(wav_file, 'tcl', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd = 'tclsh')
estimates_raw_exe = snack_raw_formants(wav_file, 'exe', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
fig = plt.figure(figsize=(8,8))
fig.suptitle('Snack formants - ' + os.path.basename(wav_file))
for i, n in enumerate(sformant_names):
ax = plt.subplot(4,2,i+1)
ax.plot(estimates_raw_exe[n], 'r.', estimates_raw_tcl[n], 'bo', markersize=1)
#ax.plot(estimates_raw_tcl[n], 'bo', estimates_raw_exe[n], 'r.', markersize=1)
ax.set_xticklabels([])
plt.title(n)
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.png')
|
Add script for comparing Snack formant methods on Windows
This is for making the comparison Snack formant plots in
https://github.com/voicesauce/opensauce-python/issues/27# Script to compare Snack formant methods on Windows
# There is a known discrepancy between Snack formants calculated using
# the full Snack Tcl library ('tcl' method) and the Windows standalone
# executable ('exe' method)
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from opensauce.snack import snack_raw_formants, sformant_names
wav_dir = 'test/data/sound-files'
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw_tcl = snack_raw_formants(wav_file, 'tcl', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd = 'tclsh')
estimates_raw_exe = snack_raw_formants(wav_file, 'exe', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
fig = plt.figure(figsize=(8,8))
fig.suptitle('Snack formants - ' + os.path.basename(wav_file))
for i, n in enumerate(sformant_names):
ax = plt.subplot(4,2,i+1)
ax.plot(estimates_raw_exe[n], 'r.', estimates_raw_tcl[n], 'bo', markersize=1)
#ax.plot(estimates_raw_tcl[n], 'bo', estimates_raw_exe[n], 'r.', markersize=1)
ax.set_xticklabels([])
plt.title(n)
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.png')
|
<commit_before><commit_msg>Add script for comparing Snack formant methods on Windows
This is for making the comparison Snack formant plots in
https://github.com/voicesauce/opensauce-python/issues/27<commit_after># Script to compare Snack formant methods on Windows
# There is a known discrepancy between Snack formants calculated using
# the full Snack Tcl library ('tcl' method) and the Windows standalone
# executable ('exe' method)
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from opensauce.snack import snack_raw_formants, sformant_names
wav_dir = 'test/data/sound-files'
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# Generate raw Snack formant samples
# Use VoiceSauce default parameter values
estimates_raw_tcl = snack_raw_formants(wav_file, 'tcl', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd = 'tclsh')
estimates_raw_exe = snack_raw_formants(wav_file, 'exe', frame_shift=1, window_size=25, pre_emphasis=0.96, lpc_order=12)
fig = plt.figure(figsize=(8,8))
fig.suptitle('Snack formants - ' + os.path.basename(wav_file))
for i, n in enumerate(sformant_names):
ax = plt.subplot(4,2,i+1)
ax.plot(estimates_raw_exe[n], 'r.', estimates_raw_tcl[n], 'bo', markersize=1)
#ax.plot(estimates_raw_tcl[n], 'bo', estimates_raw_exe[n], 'r.', markersize=1)
ax.set_xticklabels([])
plt.title(n)
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.png')
|
|
2b22bf1db53a1fa514afcb4361cbc162908416c6
|
alembic/versions/a8ad6e7fadd6_reset_current_typeemote_quest.py
|
alembic/versions/a8ad6e7fadd6_reset_current_typeemote_quest.py
|
"""Reset current typeemote quest
Revision ID: a8ad6e7fadd6
Revises: 5f746af0a82d
Create Date: 2019-06-09 11:04:34.385778
"""
# revision identifiers, used by Alembic.
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
revision = "a8ad6e7fadd6"
down_revision = "5f746af0a82d"
branch_labels = None
depends_on = None
streamer = StreamHelper.get_streamer()
def upgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
def downgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
|
Add alembic revision to reset current typeemote quest
|
Add alembic revision to reset current typeemote quest
|
Python
|
mit
|
pajlada/pajbot,pajlada/tyggbot,pajlada/tyggbot,pajlada/tyggbot,pajlada/pajbot,pajlada/pajbot,pajlada/tyggbot,pajlada/pajbot
|
Add alembic revision to reset current typeemote quest
|
"""Reset current typeemote quest
Revision ID: a8ad6e7fadd6
Revises: 5f746af0a82d
Create Date: 2019-06-09 11:04:34.385778
"""
# revision identifiers, used by Alembic.
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
revision = "a8ad6e7fadd6"
down_revision = "5f746af0a82d"
branch_labels = None
depends_on = None
streamer = StreamHelper.get_streamer()
def upgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
def downgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
|
<commit_before><commit_msg>Add alembic revision to reset current typeemote quest<commit_after>
|
"""Reset current typeemote quest
Revision ID: a8ad6e7fadd6
Revises: 5f746af0a82d
Create Date: 2019-06-09 11:04:34.385778
"""
# revision identifiers, used by Alembic.
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
revision = "a8ad6e7fadd6"
down_revision = "5f746af0a82d"
branch_labels = None
depends_on = None
streamer = StreamHelper.get_streamer()
def upgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
def downgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
|
Add alembic revision to reset current typeemote quest"""Reset current typeemote quest
Revision ID: a8ad6e7fadd6
Revises: 5f746af0a82d
Create Date: 2019-06-09 11:04:34.385778
"""
# revision identifiers, used by Alembic.
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
revision = "a8ad6e7fadd6"
down_revision = "5f746af0a82d"
branch_labels = None
depends_on = None
streamer = StreamHelper.get_streamer()
def upgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
def downgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
|
<commit_before><commit_msg>Add alembic revision to reset current typeemote quest<commit_after>"""Reset current typeemote quest
Revision ID: a8ad6e7fadd6
Revises: 5f746af0a82d
Create Date: 2019-06-09 11:04:34.385778
"""
# revision identifiers, used by Alembic.
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
revision = "a8ad6e7fadd6"
down_revision = "5f746af0a82d"
branch_labels = None
depends_on = None
streamer = StreamHelper.get_streamer()
def upgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
def downgrade():
with RedisManager.pipeline_context() as redis:
redis.delete("{streamer}:current_quest_emote".format(streamer=streamer))
redis.delete("{streamer}:current_quest".format(streamer=streamer))
redis.delete("{streamer}:current_quest_progress".format(streamer=streamer))
redis.delete("{streamer}:quests:finished".format(streamer=streamer))
|
|
7d281b5139faf889de6e05c22e9ddd6c1ed7c05f
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandClearPeoples.py
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandClearPeoples.py
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
"""
Created on 09/06/2018
@author: Lucas Maurice
"""
class WonderlandClearPeoples(EventState):
'''
Reset the people list in Wonderland. To use with care !
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandClearPeoples, self).__init__(outcomes=['done', 'error'])
self.url = "http://wonderland:8000/api/clear-people/"
def execute(self, userdata):
# try the request
try:
response = requests.delete(self.url)
loginfo(response)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
return 'done'
|
Create a state for reset peoples in wonderland.
|
Create a state for reset peoples in wonderland.
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Create a state for reset peoples in wonderland.
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
"""
Created on 09/06/2018
@author: Lucas Maurice
"""
class WonderlandClearPeoples(EventState):
'''
Reset the people list in Wonderland. To use with care !
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandClearPeoples, self).__init__(outcomes=['done', 'error'])
self.url = "http://wonderland:8000/api/clear-people/"
def execute(self, userdata):
# try the request
try:
response = requests.delete(self.url)
loginfo(response)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
return 'done'
|
<commit_before><commit_msg>Create a state for reset peoples in wonderland.<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
"""
Created on 09/06/2018
@author: Lucas Maurice
"""
class WonderlandClearPeoples(EventState):
'''
Reset the people list in Wonderland. To use with care !
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandClearPeoples, self).__init__(outcomes=['done', 'error'])
self.url = "http://wonderland:8000/api/clear-people/"
def execute(self, userdata):
# try the request
try:
response = requests.delete(self.url)
loginfo(response)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
return 'done'
|
Create a state for reset peoples in wonderland.#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
"""
Created on 09/06/2018
@author: Lucas Maurice
"""
class WonderlandClearPeoples(EventState):
'''
Reset the people list in Wonderland. To use with care !
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandClearPeoples, self).__init__(outcomes=['done', 'error'])
self.url = "http://wonderland:8000/api/clear-people/"
def execute(self, userdata):
# try the request
try:
response = requests.delete(self.url)
loginfo(response)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
return 'done'
|
<commit_before><commit_msg>Create a state for reset peoples in wonderland.<commit_after>#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
"""
Created on 09/06/2018
@author: Lucas Maurice
"""
class WonderlandClearPeoples(EventState):
'''
Reset the people list in Wonderland. To use with care !
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandClearPeoples, self).__init__(outcomes=['done', 'error'])
self.url = "http://wonderland:8000/api/clear-people/"
def execute(self, userdata):
# try the request
try:
response = requests.delete(self.url)
loginfo(response)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
return 'done'
|
|
d20a30fce8d9025629688f8c3a8d65a6747ef713
|
crawler/management/commands/compare_my_similar_with_google_similar_per_app.py
|
crawler/management/commands/compare_my_similar_with_google_similar_per_app.py
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours per app'
def handle(self, *args, **options):
apps = self.get_my_similar()
result_dict = dict()
for app in apps:
count = 0
similar_apps = SimilarApp.objects.filter(source_package=app).all()
total = len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
similar_apps = SimilarApp.objects.filter(similar_package=app).all()
total = total + len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
percentage = count / total
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('comparison_per_app.csv', 'w')
admin_file.write('{};{}\n'.format('Percentage of Compatibility', 'Apps in this situation'))
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(self.style.SUCCESS('Finished'.format()))
@staticmethod
def is_compatible_with_google(similar_app):
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.source_package,
similar_package=similar_app.similar_package
).count()
if count > 0:
return True
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.similar_package,
similar_package=similar_app.source_package
).count()
if count > 0:
return True
return False
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
Create command to compare my similar with google similar per app
|
Create command to compare my similar with google similar per app
|
Python
|
apache-2.0
|
bkosawa/admin-recommendation
|
Create command to compare my similar with google similar per app
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours per app'
def handle(self, *args, **options):
apps = self.get_my_similar()
result_dict = dict()
for app in apps:
count = 0
similar_apps = SimilarApp.objects.filter(source_package=app).all()
total = len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
similar_apps = SimilarApp.objects.filter(similar_package=app).all()
total = total + len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
percentage = count / total
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('comparison_per_app.csv', 'w')
admin_file.write('{};{}\n'.format('Percentage of Compatibility', 'Apps in this situation'))
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(self.style.SUCCESS('Finished'.format()))
@staticmethod
def is_compatible_with_google(similar_app):
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.source_package,
similar_package=similar_app.similar_package
).count()
if count > 0:
return True
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.similar_package,
similar_package=similar_app.source_package
).count()
if count > 0:
return True
return False
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
<commit_before><commit_msg>Create command to compare my similar with google similar per app<commit_after>
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours per app'
def handle(self, *args, **options):
apps = self.get_my_similar()
result_dict = dict()
for app in apps:
count = 0
similar_apps = SimilarApp.objects.filter(source_package=app).all()
total = len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
similar_apps = SimilarApp.objects.filter(similar_package=app).all()
total = total + len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
percentage = count / total
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('comparison_per_app.csv', 'w')
admin_file.write('{};{}\n'.format('Percentage of Compatibility', 'Apps in this situation'))
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(self.style.SUCCESS('Finished'.format()))
@staticmethod
def is_compatible_with_google(similar_app):
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.source_package,
similar_package=similar_app.similar_package
).count()
if count > 0:
return True
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.similar_package,
similar_package=similar_app.source_package
).count()
if count > 0:
return True
return False
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
Create command to compare my similar with google similar per appimport logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours per app'
def handle(self, *args, **options):
apps = self.get_my_similar()
result_dict = dict()
for app in apps:
count = 0
similar_apps = SimilarApp.objects.filter(source_package=app).all()
total = len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
similar_apps = SimilarApp.objects.filter(similar_package=app).all()
total = total + len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
percentage = count / total
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('comparison_per_app.csv', 'w')
admin_file.write('{};{}\n'.format('Percentage of Compatibility', 'Apps in this situation'))
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(self.style.SUCCESS('Finished'.format()))
@staticmethod
def is_compatible_with_google(similar_app):
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.source_package,
similar_package=similar_app.similar_package
).count()
if count > 0:
return True
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.similar_package,
similar_package=similar_app.source_package
).count()
if count > 0:
return True
return False
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
<commit_before><commit_msg>Create command to compare my similar with google similar per app<commit_after>import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours per app'
def handle(self, *args, **options):
apps = self.get_my_similar()
result_dict = dict()
for app in apps:
count = 0
similar_apps = SimilarApp.objects.filter(source_package=app).all()
total = len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
similar_apps = SimilarApp.objects.filter(similar_package=app).all()
total = total + len(similar_apps)
for similar_app in similar_apps:
if self.is_compatible_with_google(similar_app):
count = count + 1
percentage = count / total
if percentage not in result_dict:
result_count = 0
else:
result_count = result_dict[percentage]
result_dict[percentage] = result_count + 1
admin_file = open('comparison_per_app.csv', 'w')
admin_file.write('{};{}\n'.format('Percentage of Compatibility', 'Apps in this situation'))
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(self.style.SUCCESS('Finished'.format()))
@staticmethod
def is_compatible_with_google(similar_app):
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.source_package,
similar_package=similar_app.similar_package
).count()
if count > 0:
return True
count = GoogleSimilarApp.objects.filter(
source_package=similar_app.similar_package,
similar_package=similar_app.source_package
).count()
if count > 0:
return True
return False
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
|
c81fcc0a3604c59cbebcbc9618129b1f6b6007c7
|
example-scripts/voctolight/voctolight_async_io.py
|
example-scripts/voctolight/voctolight_async_io.py
|
import asyncio
from enum import Enum
class Connection(object):
def __init__(self, interpreter):
self.interpreter = interpreter
self.loop = asyncio.get_event_loop()
def __del__(self):
self.loop.close()
def schedule(self, message):
self.loop.create_task(self.connection_future(message, self.interpreter.handler))
def set_host(self, host, port = '9999'):
self.host = host
self.port = port
### FIXME This logic is wrong. we must send requests, and independently wait for
### answers. Otherwise we will never receive answers to requests that we haven't
### asked for.
@asyncio.coroutine
def connection_future(connection, message, handler):
reader, writer = yield from asyncio.open_connection(connection.host,
connection.port,
loop=connection.loop)
print('Sent: %r' % message)
writer.write(message.encode())
writer.write('\n'.encode())
data = yield from reader.readline()
handler(message, data.decode().rstrip('\n'))
writer.close()
### FIXME Duplicate from videomix.py
class CompositeModes(Enum):
fullscreen = 0
side_by_side_equal = 1
side_by_side_preview = 2
picture_in_picture = 3
class Interpreter(object):
def __init__(self, actor):
self.actor = actor
self.a_or_b = False
self.composite_mode = CompositeModes.fullscreen
actor.reset_led()
def compute_state(self):
if self.composite_mode == CompositeModes.fullscreen:
actor.enable_tally(self.a_or_b and self.primary)
else:
actor.enable_tally(self.a_or_b)
def handler(self, message, response):
print("got " + response + " for " + message)
words = response.split()
signal = words[0]
args = words[1:]
self.__getattribute__("handle_"+signal)(args)
interpreter.compute_state()
def handle_video_status(self, cams):
### FIXME DO NOT HARDCODE CAM NAME, READ FROM CONFIG!
if "cam2" in cams:
self.a_or_b = True
else:
self.a_or_b = False
self.primary = (cams[0] == "cam2")
def handle_composite_mode(self, mode):
self.composite_mode = mode
class FakeLedActor:
def __init__(self):
pass
def reset_led(self):
print("LED has been reset to off")
def enable_tally(self, enable):
if enable == True:
print("tally on!")
else:
print("tally off!")
if __name__ == "__main__":
actor = FakeLedActor()
interpreter = Interpreter(actor)
conn = Connection(interpreter)
conn.set_host("10.73.23.3")
conn.schedule("get_video")
conn.schedule("get_composite_mode")
conn.loop.run_forever()
conn.wait_closed()
|
Add prelim. async io version
|
Add prelim. async io version
|
Python
|
mit
|
voc/voctomix,voc/voctomix,h01ger/voctomix,h01ger/voctomix
|
Add prelim. async io version
|
import asyncio
from enum import Enum
class Connection(object):
def __init__(self, interpreter):
self.interpreter = interpreter
self.loop = asyncio.get_event_loop()
def __del__(self):
self.loop.close()
def schedule(self, message):
self.loop.create_task(self.connection_future(message, self.interpreter.handler))
def set_host(self, host, port = '9999'):
self.host = host
self.port = port
### FIXME This logic is wrong. we must send requests, and independently wait for
### answers. Otherwise we will never receive answers to requests that we haven't
### asked for.
@asyncio.coroutine
def connection_future(connection, message, handler):
reader, writer = yield from asyncio.open_connection(connection.host,
connection.port,
loop=connection.loop)
print('Sent: %r' % message)
writer.write(message.encode())
writer.write('\n'.encode())
data = yield from reader.readline()
handler(message, data.decode().rstrip('\n'))
writer.close()
### FIXME Duplicate from videomix.py
class CompositeModes(Enum):
fullscreen = 0
side_by_side_equal = 1
side_by_side_preview = 2
picture_in_picture = 3
class Interpreter(object):
def __init__(self, actor):
self.actor = actor
self.a_or_b = False
self.composite_mode = CompositeModes.fullscreen
actor.reset_led()
def compute_state(self):
if self.composite_mode == CompositeModes.fullscreen:
actor.enable_tally(self.a_or_b and self.primary)
else:
actor.enable_tally(self.a_or_b)
def handler(self, message, response):
print("got " + response + " for " + message)
words = response.split()
signal = words[0]
args = words[1:]
self.__getattribute__("handle_"+signal)(args)
interpreter.compute_state()
def handle_video_status(self, cams):
### FIXME DO NOT HARDCODE CAM NAME, READ FROM CONFIG!
if "cam2" in cams:
self.a_or_b = True
else:
self.a_or_b = False
self.primary = (cams[0] == "cam2")
def handle_composite_mode(self, mode):
self.composite_mode = mode
class FakeLedActor:
def __init__(self):
pass
def reset_led(self):
print("LED has been reset to off")
def enable_tally(self, enable):
if enable == True:
print("tally on!")
else:
print("tally off!")
if __name__ == "__main__":
actor = FakeLedActor()
interpreter = Interpreter(actor)
conn = Connection(interpreter)
conn.set_host("10.73.23.3")
conn.schedule("get_video")
conn.schedule("get_composite_mode")
conn.loop.run_forever()
conn.wait_closed()
|
<commit_before><commit_msg>Add prelim. async io version<commit_after>
|
import asyncio
from enum import Enum
class Connection(object):
def __init__(self, interpreter):
self.interpreter = interpreter
self.loop = asyncio.get_event_loop()
def __del__(self):
self.loop.close()
def schedule(self, message):
self.loop.create_task(self.connection_future(message, self.interpreter.handler))
def set_host(self, host, port = '9999'):
self.host = host
self.port = port
### FIXME This logic is wrong. we must send requests, and independently wait for
### answers. Otherwise we will never receive answers to requests that we haven't
### asked for.
@asyncio.coroutine
def connection_future(connection, message, handler):
reader, writer = yield from asyncio.open_connection(connection.host,
connection.port,
loop=connection.loop)
print('Sent: %r' % message)
writer.write(message.encode())
writer.write('\n'.encode())
data = yield from reader.readline()
handler(message, data.decode().rstrip('\n'))
writer.close()
### FIXME Duplicate from videomix.py
class CompositeModes(Enum):
fullscreen = 0
side_by_side_equal = 1
side_by_side_preview = 2
picture_in_picture = 3
class Interpreter(object):
def __init__(self, actor):
self.actor = actor
self.a_or_b = False
self.composite_mode = CompositeModes.fullscreen
actor.reset_led()
def compute_state(self):
if self.composite_mode == CompositeModes.fullscreen:
actor.enable_tally(self.a_or_b and self.primary)
else:
actor.enable_tally(self.a_or_b)
def handler(self, message, response):
print("got " + response + " for " + message)
words = response.split()
signal = words[0]
args = words[1:]
self.__getattribute__("handle_"+signal)(args)
interpreter.compute_state()
def handle_video_status(self, cams):
### FIXME DO NOT HARDCODE CAM NAME, READ FROM CONFIG!
if "cam2" in cams:
self.a_or_b = True
else:
self.a_or_b = False
self.primary = (cams[0] == "cam2")
def handle_composite_mode(self, mode):
self.composite_mode = mode
class FakeLedActor:
def __init__(self):
pass
def reset_led(self):
print("LED has been reset to off")
def enable_tally(self, enable):
if enable == True:
print("tally on!")
else:
print("tally off!")
if __name__ == "__main__":
actor = FakeLedActor()
interpreter = Interpreter(actor)
conn = Connection(interpreter)
conn.set_host("10.73.23.3")
conn.schedule("get_video")
conn.schedule("get_composite_mode")
conn.loop.run_forever()
conn.wait_closed()
|
Add prelim. async io versionimport asyncio
from enum import Enum
class Connection(object):
def __init__(self, interpreter):
self.interpreter = interpreter
self.loop = asyncio.get_event_loop()
def __del__(self):
self.loop.close()
def schedule(self, message):
self.loop.create_task(self.connection_future(message, self.interpreter.handler))
def set_host(self, host, port = '9999'):
self.host = host
self.port = port
### FIXME This logic is wrong. we must send requests, and independently wait for
### answers. Otherwise we will never receive answers to requests that we haven't
### asked for.
@asyncio.coroutine
def connection_future(connection, message, handler):
reader, writer = yield from asyncio.open_connection(connection.host,
connection.port,
loop=connection.loop)
print('Sent: %r' % message)
writer.write(message.encode())
writer.write('\n'.encode())
data = yield from reader.readline()
handler(message, data.decode().rstrip('\n'))
writer.close()
### FIXME Duplicate from videomix.py
class CompositeModes(Enum):
fullscreen = 0
side_by_side_equal = 1
side_by_side_preview = 2
picture_in_picture = 3
class Interpreter(object):
def __init__(self, actor):
self.actor = actor
self.a_or_b = False
self.composite_mode = CompositeModes.fullscreen
actor.reset_led()
def compute_state(self):
if self.composite_mode == CompositeModes.fullscreen:
actor.enable_tally(self.a_or_b and self.primary)
else:
actor.enable_tally(self.a_or_b)
def handler(self, message, response):
print("got " + response + " for " + message)
words = response.split()
signal = words[0]
args = words[1:]
self.__getattribute__("handle_"+signal)(args)
interpreter.compute_state()
def handle_video_status(self, cams):
### FIXME DO NOT HARDCODE CAM NAME, READ FROM CONFIG!
if "cam2" in cams:
self.a_or_b = True
else:
self.a_or_b = False
self.primary = (cams[0] == "cam2")
def handle_composite_mode(self, mode):
self.composite_mode = mode
class FakeLedActor:
def __init__(self):
pass
def reset_led(self):
print("LED has been reset to off")
def enable_tally(self, enable):
if enable == True:
print("tally on!")
else:
print("tally off!")
if __name__ == "__main__":
actor = FakeLedActor()
interpreter = Interpreter(actor)
conn = Connection(interpreter)
conn.set_host("10.73.23.3")
conn.schedule("get_video")
conn.schedule("get_composite_mode")
conn.loop.run_forever()
conn.wait_closed()
|
<commit_before><commit_msg>Add prelim. async io version<commit_after>import asyncio
from enum import Enum
class Connection(object):
def __init__(self, interpreter):
self.interpreter = interpreter
self.loop = asyncio.get_event_loop()
def __del__(self):
self.loop.close()
def schedule(self, message):
self.loop.create_task(self.connection_future(message, self.interpreter.handler))
def set_host(self, host, port = '9999'):
self.host = host
self.port = port
### FIXME This logic is wrong. we must send requests, and independently wait for
### answers. Otherwise we will never receive answers to requests that we haven't
### asked for.
@asyncio.coroutine
def connection_future(connection, message, handler):
reader, writer = yield from asyncio.open_connection(connection.host,
connection.port,
loop=connection.loop)
print('Sent: %r' % message)
writer.write(message.encode())
writer.write('\n'.encode())
data = yield from reader.readline()
handler(message, data.decode().rstrip('\n'))
writer.close()
### FIXME Duplicate from videomix.py
class CompositeModes(Enum):
fullscreen = 0
side_by_side_equal = 1
side_by_side_preview = 2
picture_in_picture = 3
class Interpreter(object):
def __init__(self, actor):
self.actor = actor
self.a_or_b = False
self.composite_mode = CompositeModes.fullscreen
actor.reset_led()
def compute_state(self):
if self.composite_mode == CompositeModes.fullscreen:
actor.enable_tally(self.a_or_b and self.primary)
else:
actor.enable_tally(self.a_or_b)
def handler(self, message, response):
print("got " + response + " for " + message)
words = response.split()
signal = words[0]
args = words[1:]
self.__getattribute__("handle_"+signal)(args)
interpreter.compute_state()
def handle_video_status(self, cams):
### FIXME DO NOT HARDCODE CAM NAME, READ FROM CONFIG!
if "cam2" in cams:
self.a_or_b = True
else:
self.a_or_b = False
self.primary = (cams[0] == "cam2")
def handle_composite_mode(self, mode):
self.composite_mode = mode
class FakeLedActor:
def __init__(self):
pass
def reset_led(self):
print("LED has been reset to off")
def enable_tally(self, enable):
if enable == True:
print("tally on!")
else:
print("tally off!")
if __name__ == "__main__":
actor = FakeLedActor()
interpreter = Interpreter(actor)
conn = Connection(interpreter)
conn.set_host("10.73.23.3")
conn.schedule("get_video")
conn.schedule("get_composite_mode")
conn.loop.run_forever()
conn.wait_closed()
|
|
a570c3a079cf7a6d3aa96b898b54b3b92a923c77
|
homedisplay/info_transportation/migrations/0012_remove_line_only_show_next.py
|
homedisplay/info_transportation/migrations/0012_remove_line_only_show_next.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0011_line_icon'),
]
operations = [
migrations.RemoveField(
model_name='line',
name='only_show_next',
),
]
|
Remove unused field from Line model
|
Remove unused field from Line model
|
Python
|
bsd-3-clause
|
ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display
|
Remove unused field from Line model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0011_line_icon'),
]
operations = [
migrations.RemoveField(
model_name='line',
name='only_show_next',
),
]
|
<commit_before><commit_msg>Remove unused field from Line model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0011_line_icon'),
]
operations = [
migrations.RemoveField(
model_name='line',
name='only_show_next',
),
]
|
Remove unused field from Line model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0011_line_icon'),
]
operations = [
migrations.RemoveField(
model_name='line',
name='only_show_next',
),
]
|
<commit_before><commit_msg>Remove unused field from Line model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0011_line_icon'),
]
operations = [
migrations.RemoveField(
model_name='line',
name='only_show_next',
),
]
|
|
bba016305982967610ee7bd8e08bd45a176dbc7e
|
alpha-vantage/alphavantage.py
|
alpha-vantage/alphavantage.py
|
try:
# Python 3 import
from urllib.request import urlopen
except ImportError:
# Python 2.* import
from urllib2 import urlopen
from simplejson import loads
class AlphaVantage:
"""
This class is in charge of creating a python interface between the Alpha
Vantage restful API and your python application
"""
def __init__(self, key=None):
self.key = key
def _data_request(self):
url = "http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey={}".format(self.key)
response = urlopen(url)
json_response = loads(response.read())
print(json_response)
if __name__ == '__main__':
av = AlphaVantage(key='486U')
av._data_request()
|
Create data request function to get json from the API
|
feat: Create data request function to get json from the API
|
Python
|
mit
|
RomelTorres/alpha_vantage
|
feat: Create data request function to get json from the API
|
try:
# Python 3 import
from urllib.request import urlopen
except ImportError:
# Python 2.* import
from urllib2 import urlopen
from simplejson import loads
class AlphaVantage:
"""
This class is in charge of creating a python interface between the Alpha
Vantage restful API and your python application
"""
def __init__(self, key=None):
self.key = key
def _data_request(self):
url = "http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey={}".format(self.key)
response = urlopen(url)
json_response = loads(response.read())
print(json_response)
if __name__ == '__main__':
av = AlphaVantage(key='486U')
av._data_request()
|
<commit_before><commit_msg>feat: Create data request function to get json from the API<commit_after>
|
try:
# Python 3 import
from urllib.request import urlopen
except ImportError:
# Python 2.* import
from urllib2 import urlopen
from simplejson import loads
class AlphaVantage:
"""
This class is in charge of creating a python interface between the Alpha
Vantage restful API and your python application
"""
def __init__(self, key=None):
self.key = key
def _data_request(self):
url = "http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey={}".format(self.key)
response = urlopen(url)
json_response = loads(response.read())
print(json_response)
if __name__ == '__main__':
av = AlphaVantage(key='486U')
av._data_request()
|
feat: Create data request function to get json from the APItry:
# Python 3 import
from urllib.request import urlopen
except ImportError:
# Python 2.* import
from urllib2 import urlopen
from simplejson import loads
class AlphaVantage:
"""
This class is in charge of creating a python interface between the Alpha
Vantage restful API and your python application
"""
def __init__(self, key=None):
self.key = key
def _data_request(self):
url = "http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey={}".format(self.key)
response = urlopen(url)
json_response = loads(response.read())
print(json_response)
if __name__ == '__main__':
av = AlphaVantage(key='486U')
av._data_request()
|
<commit_before><commit_msg>feat: Create data request function to get json from the API<commit_after>try:
# Python 3 import
from urllib.request import urlopen
except ImportError:
# Python 2.* import
from urllib2 import urlopen
from simplejson import loads
class AlphaVantage:
"""
This class is in charge of creating a python interface between the Alpha
Vantage restful API and your python application
"""
def __init__(self, key=None):
self.key = key
def _data_request(self):
url = "http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey={}".format(self.key)
response = urlopen(url)
json_response = loads(response.read())
print(json_response)
if __name__ == '__main__':
av = AlphaVantage(key='486U')
av._data_request()
|
|
ae36703539f3e36a56db6a4f1c082ddbe25a28c4
|
regulations/templatetags/to_list.py
|
regulations/templatetags/to_list.py
|
"""Particularly when displaying bullets, it can be immensely helpful to
construct a list _within_ the template. Django's templates don't provide this
out of the box, so we need a new template tag. Suggestion from:
http://stackoverflow.com/a/34407158"""
from django import template
register = template.Library()
@register.assignment_tag
def to_list(*args):
return args
|
Add macro for creating a list
|
Add macro for creating a list
This also allows us to pass a list in as a variable to a template, which, in
turn, allows us to make things like a re-usable landing page search template
for ATF
|
Python
|
cc0-1.0
|
tadhg-ohiggins/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,18F/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,18F/regulations-site,eregs/regulations-site
|
Add macro for creating a list
This also allows us to pass a list in as a variable to a template, which, in
turn, allows us to make things like a re-usable landing page search template
for ATF
|
"""Particularly when displaying bullets, it can be immensely helpful to
construct a list _within_ the template. Django's templates don't provide this
out of the box, so we need a new template tag. Suggestion from:
http://stackoverflow.com/a/34407158"""
from django import template
register = template.Library()
@register.assignment_tag
def to_list(*args):
return args
|
<commit_before><commit_msg>Add macro for creating a list
This also allows us to pass a list in as a variable to a template, which, in
turn, allows us to make things like a re-usable landing page search template
for ATF<commit_after>
|
"""Particularly when displaying bullets, it can be immensely helpful to
construct a list _within_ the template. Django's templates don't provide this
out of the box, so we need a new template tag. Suggestion from:
http://stackoverflow.com/a/34407158"""
from django import template
register = template.Library()
@register.assignment_tag
def to_list(*args):
return args
|
Add macro for creating a list
This also allows us to pass a list in as a variable to a template, which, in
turn, allows us to make things like a re-usable landing page search template
for ATF"""Particularly when displaying bullets, it can be immensely helpful to
construct a list _within_ the template. Django's templates don't provide this
out of the box, so we need a new template tag. Suggestion from:
http://stackoverflow.com/a/34407158"""
from django import template
register = template.Library()
@register.assignment_tag
def to_list(*args):
return args
|
<commit_before><commit_msg>Add macro for creating a list
This also allows us to pass a list in as a variable to a template, which, in
turn, allows us to make things like a re-usable landing page search template
for ATF<commit_after>"""Particularly when displaying bullets, it can be immensely helpful to
construct a list _within_ the template. Django's templates don't provide this
out of the box, so we need a new template tag. Suggestion from:
http://stackoverflow.com/a/34407158"""
from django import template
register = template.Library()
@register.assignment_tag
def to_list(*args):
return args
|
|
6932aa2079f0696100cfc304921e947d020f9e15
|
librisxl-tools/scripts/lddb_json_shape.py
|
librisxl-tools/scripts/lddb_json_shape.py
|
from __future__ import print_function, unicode_literals
import json
import sys
MAX_STATS = 20
def compute_shape(node, index):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = node.get('@type')
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if len(stats) < MAX_STATS:
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
index = {}
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
try:
data = json.loads(l)
thing = data['@graph'][1]
thing['meta'] = data['@graph'][0]
compute_shape(thing, index)
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(json.dumps(index, indent=2))
|
Add script for computing a JSON-LD shape from data
|
Add script for computing a JSON-LD shape from data
Measures frequency of used properties and classes and creates a shape
representing the shape of a selection of descriptions in LDDB.
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Add script for computing a JSON-LD shape from data
Measures frequency of used properties and classes and creates a shape
representing the shape of a selection of descriptions in LDDB.
|
from __future__ import print_function, unicode_literals
import json
import sys
MAX_STATS = 20
def compute_shape(node, index):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = node.get('@type')
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if len(stats) < MAX_STATS:
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
index = {}
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
try:
data = json.loads(l)
thing = data['@graph'][1]
thing['meta'] = data['@graph'][0]
compute_shape(thing, index)
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(json.dumps(index, indent=2))
|
<commit_before><commit_msg>Add script for computing a JSON-LD shape from data
Measures frequency of used properties and classes and creates a shape
representing the shape of a selection of descriptions in LDDB.<commit_after>
|
from __future__ import print_function, unicode_literals
import json
import sys
MAX_STATS = 20
def compute_shape(node, index):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = node.get('@type')
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if len(stats) < MAX_STATS:
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
index = {}
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
try:
data = json.loads(l)
thing = data['@graph'][1]
thing['meta'] = data['@graph'][0]
compute_shape(thing, index)
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(json.dumps(index, indent=2))
|
Add script for computing a JSON-LD shape from data
Measures frequency of used properties and classes and creates a shape
representing the shape of a selection of descriptions in LDDB.from __future__ import print_function, unicode_literals
import json
import sys
MAX_STATS = 20
def compute_shape(node, index):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = node.get('@type')
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if len(stats) < MAX_STATS:
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
index = {}
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
try:
data = json.loads(l)
thing = data['@graph'][1]
thing['meta'] = data['@graph'][0]
compute_shape(thing, index)
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(json.dumps(index, indent=2))
|
<commit_before><commit_msg>Add script for computing a JSON-LD shape from data
Measures frequency of used properties and classes and creates a shape
representing the shape of a selection of descriptions in LDDB.<commit_after>from __future__ import print_function, unicode_literals
import json
import sys
MAX_STATS = 20
def compute_shape(node, index):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = node.get('@type')
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if len(stats) < MAX_STATS:
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
index = {}
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
try:
data = json.loads(l)
thing = data['@graph'][1]
thing['meta'] = data['@graph'][0]
compute_shape(thing, index)
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(json.dumps(index, indent=2))
|
|
93e18fe289235c1aa5f14dffbd14a29befc79d54
|
zerver/migrations/0371_invalid_characters_in_topics.py
|
zerver/migrations/0371_invalid_characters_in_topics.py
|
import unicodedata
from django.db import connection, migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_topics(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model("zerver", "Message")
BATCH_SIZE = 10000
messages_updated = 0
lower_bound = 0
max_id = Message.objects.aggregate(models.Max("id"))["id__max"]
if max_id is None:
# Nothing to do if there are no messages.
return
print("")
while lower_bound < max_id:
print(f"Processed {lower_bound} / {max_id}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT DISTINCT subject FROM zerver_message WHERE id > %s AND id <= %s",
[lower_bound, lower_bound + BATCH_SIZE],
)
results = cursor.fetchall()
topics = [r[0] for r in results]
for topic in topics:
fixed_topic = "".join(
[
character
for character in topic
if unicodedata.category(character) not in ["Cc", "Cs", "Cn"]
]
)
if fixed_topic == topic:
continue
# We don't want empty topics for stream messages, so we
# use (no topic) if the above clean-up leaves us with an empty string.
if fixed_topic == "":
fixed_topic = "(no topic)"
cursor.execute(
"UPDATE zerver_message SET subject = %s WHERE subject = %s AND id > %s AND id <= %s",
[fixed_topic, topic, lower_bound, lower_bound + BATCH_SIZE],
)
messages_updated += cursor.rowcount
lower_bound += BATCH_SIZE
if messages_updated > 0:
print(f"Fixed invalid topics for {messages_updated} messages.")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0370_realm_enable_spectator_access"),
]
operations = [
migrations.RunPython(fix_topics, reverse_code=migrations.RunPython.noop),
]
|
Remove disallowed characters from topics.
|
migrations: Remove disallowed characters from topics.
Following b3c58f454f0dce8a88c696305945bac41f9786bc, we want to clean up
old topics that may contain the disallowed characters. The Message table
is large, so we go in batches, making sure we limit topic fetches and
UPDATE query to no more than BATCH_SIZE Message rows per query.
|
Python
|
apache-2.0
|
zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,eeshangarg/zulip,zulip/zulip,kou/zulip,kou/zulip,eeshangarg/zulip,eeshangarg/zulip,kou/zulip,rht/zulip,eeshangarg/zulip,zulip/zulip,kou/zulip,rht/zulip,andersk/zulip,rht/zulip,rht/zulip,andersk/zulip,eeshangarg/zulip,kou/zulip,andersk/zulip,kou/zulip,rht/zulip,eeshangarg/zulip,zulip/zulip,zulip/zulip,eeshangarg/zulip,rht/zulip,andersk/zulip,zulip/zulip,andersk/zulip,andersk/zulip,kou/zulip
|
migrations: Remove disallowed characters from topics.
Following b3c58f454f0dce8a88c696305945bac41f9786bc, we want to clean up
old topics that may contain the disallowed characters. The Message table
is large, so we go in batches, making sure we limit topic fetches and
UPDATE query to no more than BATCH_SIZE Message rows per query.
|
import unicodedata
from django.db import connection, migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_topics(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model("zerver", "Message")
BATCH_SIZE = 10000
messages_updated = 0
lower_bound = 0
max_id = Message.objects.aggregate(models.Max("id"))["id__max"]
if max_id is None:
# Nothing to do if there are no messages.
return
print("")
while lower_bound < max_id:
print(f"Processed {lower_bound} / {max_id}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT DISTINCT subject FROM zerver_message WHERE id > %s AND id <= %s",
[lower_bound, lower_bound + BATCH_SIZE],
)
results = cursor.fetchall()
topics = [r[0] for r in results]
for topic in topics:
fixed_topic = "".join(
[
character
for character in topic
if unicodedata.category(character) not in ["Cc", "Cs", "Cn"]
]
)
if fixed_topic == topic:
continue
# We don't want empty topics for stream messages, so we
# use (no topic) if the above clean-up leaves us with an empty string.
if fixed_topic == "":
fixed_topic = "(no topic)"
cursor.execute(
"UPDATE zerver_message SET subject = %s WHERE subject = %s AND id > %s AND id <= %s",
[fixed_topic, topic, lower_bound, lower_bound + BATCH_SIZE],
)
messages_updated += cursor.rowcount
lower_bound += BATCH_SIZE
if messages_updated > 0:
print(f"Fixed invalid topics for {messages_updated} messages.")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0370_realm_enable_spectator_access"),
]
operations = [
migrations.RunPython(fix_topics, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>migrations: Remove disallowed characters from topics.
Following b3c58f454f0dce8a88c696305945bac41f9786bc, we want to clean up
old topics that may contain the disallowed characters. The Message table
is large, so we go in batches, making sure we limit topic fetches and
UPDATE query to no more than BATCH_SIZE Message rows per query.<commit_after>
|
import unicodedata
from django.db import connection, migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_topics(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model("zerver", "Message")
BATCH_SIZE = 10000
messages_updated = 0
lower_bound = 0
max_id = Message.objects.aggregate(models.Max("id"))["id__max"]
if max_id is None:
# Nothing to do if there are no messages.
return
print("")
while lower_bound < max_id:
print(f"Processed {lower_bound} / {max_id}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT DISTINCT subject FROM zerver_message WHERE id > %s AND id <= %s",
[lower_bound, lower_bound + BATCH_SIZE],
)
results = cursor.fetchall()
topics = [r[0] for r in results]
for topic in topics:
fixed_topic = "".join(
[
character
for character in topic
if unicodedata.category(character) not in ["Cc", "Cs", "Cn"]
]
)
if fixed_topic == topic:
continue
# We don't want empty topics for stream messages, so we
# use (no topic) if the above clean-up leaves us with an empty string.
if fixed_topic == "":
fixed_topic = "(no topic)"
cursor.execute(
"UPDATE zerver_message SET subject = %s WHERE subject = %s AND id > %s AND id <= %s",
[fixed_topic, topic, lower_bound, lower_bound + BATCH_SIZE],
)
messages_updated += cursor.rowcount
lower_bound += BATCH_SIZE
if messages_updated > 0:
print(f"Fixed invalid topics for {messages_updated} messages.")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0370_realm_enable_spectator_access"),
]
operations = [
migrations.RunPython(fix_topics, reverse_code=migrations.RunPython.noop),
]
|
migrations: Remove disallowed characters from topics.
Following b3c58f454f0dce8a88c696305945bac41f9786bc, we want to clean up
old topics that may contain the disallowed characters. The Message table
is large, so we go in batches, making sure we limit topic fetches and
UPDATE query to no more than BATCH_SIZE Message rows per query.import unicodedata
from django.db import connection, migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_topics(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model("zerver", "Message")
BATCH_SIZE = 10000
messages_updated = 0
lower_bound = 0
max_id = Message.objects.aggregate(models.Max("id"))["id__max"]
if max_id is None:
# Nothing to do if there are no messages.
return
print("")
while lower_bound < max_id:
print(f"Processed {lower_bound} / {max_id}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT DISTINCT subject FROM zerver_message WHERE id > %s AND id <= %s",
[lower_bound, lower_bound + BATCH_SIZE],
)
results = cursor.fetchall()
topics = [r[0] for r in results]
for topic in topics:
fixed_topic = "".join(
[
character
for character in topic
if unicodedata.category(character) not in ["Cc", "Cs", "Cn"]
]
)
if fixed_topic == topic:
continue
# We don't want empty topics for stream messages, so we
# use (no topic) if the above clean-up leaves us with an empty string.
if fixed_topic == "":
fixed_topic = "(no topic)"
cursor.execute(
"UPDATE zerver_message SET subject = %s WHERE subject = %s AND id > %s AND id <= %s",
[fixed_topic, topic, lower_bound, lower_bound + BATCH_SIZE],
)
messages_updated += cursor.rowcount
lower_bound += BATCH_SIZE
if messages_updated > 0:
print(f"Fixed invalid topics for {messages_updated} messages.")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0370_realm_enable_spectator_access"),
]
operations = [
migrations.RunPython(fix_topics, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>migrations: Remove disallowed characters from topics.
Following b3c58f454f0dce8a88c696305945bac41f9786bc, we want to clean up
old topics that may contain the disallowed characters. The Message table
is large, so we go in batches, making sure we limit topic fetches and
UPDATE query to no more than BATCH_SIZE Message rows per query.<commit_after>import unicodedata
from django.db import connection, migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_topics(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model("zerver", "Message")
BATCH_SIZE = 10000
messages_updated = 0
lower_bound = 0
max_id = Message.objects.aggregate(models.Max("id"))["id__max"]
if max_id is None:
# Nothing to do if there are no messages.
return
print("")
while lower_bound < max_id:
print(f"Processed {lower_bound} / {max_id}")
with connection.cursor() as cursor:
cursor.execute(
"SELECT DISTINCT subject FROM zerver_message WHERE id > %s AND id <= %s",
[lower_bound, lower_bound + BATCH_SIZE],
)
results = cursor.fetchall()
topics = [r[0] for r in results]
for topic in topics:
fixed_topic = "".join(
[
character
for character in topic
if unicodedata.category(character) not in ["Cc", "Cs", "Cn"]
]
)
if fixed_topic == topic:
continue
# We don't want empty topics for stream messages, so we
# use (no topic) if the above clean-up leaves us with an empty string.
if fixed_topic == "":
fixed_topic = "(no topic)"
cursor.execute(
"UPDATE zerver_message SET subject = %s WHERE subject = %s AND id > %s AND id <= %s",
[fixed_topic, topic, lower_bound, lower_bound + BATCH_SIZE],
)
messages_updated += cursor.rowcount
lower_bound += BATCH_SIZE
if messages_updated > 0:
print(f"Fixed invalid topics for {messages_updated} messages.")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0370_realm_enable_spectator_access"),
]
operations = [
migrations.RunPython(fix_topics, reverse_code=migrations.RunPython.noop),
]
|
|
b8259412ecca837345597209c1aa46872c45cbf3
|
samples/admin.py
|
samples/admin.py
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 2
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 1
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
Reduce number of extra fields for prettyness
|
:art: Reduce number of extra fields for prettyness
|
Python
|
mit
|
gems-uff/labsys,gems-uff/labsys,gems-uff/labsys,gcrsaldanha/fiocruz,gcrsaldanha/fiocruz
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 2
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
:art: Reduce number of extra fields for prettyness
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 1
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
<commit_before>from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 2
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
<commit_msg>:art: Reduce number of extra fields for prettyness<commit_after>
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 1
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 2
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
:art: Reduce number of extra fields for prettynessfrom django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 1
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
<commit_before>from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 2
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
<commit_msg>:art: Reduce number of extra fields for prettyness<commit_after>from django.contrib import admin
from .models import (
Patient, PatientRegister,
FluVaccine,
Sample, CollectionType,
Symptom, ObservedSymptom
)
class FluVaccineInline(admin.StackedInline):
model = FluVaccine
extra = 1
class SampleInline(admin.StackedInline):
model = Sample
extra = 1
class ObservedSymptomInline(admin.StackedInline):
model = ObservedSymptom
extra = 1
class PatientRegisterAdmin(admin.ModelAdmin):
fieldsets = [
('Informações do Paciente', {'fields': ['patient']}),
('Dados institucionais', {'fields': ['id_gal_origin']}),
]
inlines = [
SampleInline,
FluVaccineInline,
ObservedSymptomInline,
]
admin.site.register(Patient)
admin.site.register(PatientRegister, PatientRegisterAdmin)
admin.site.register(Sample)
admin.site.register(CollectionType)
admin.site.register(Symptom)
|
f85dfe27437bd5b9931dc01b9da8329b1102de4a
|
create_tiles.py
|
create_tiles.py
|
import subprocess
src_files = '/g/data/rs0/scenes/ARG25_V0.0/2015-04/LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425/scene01/*.tif'
src_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.vrt'
subprocess.call(['gdalbuildvrt', src_vrt, src_files], shell=True).wait()
target_src = 'EPSG:4326'
reprojected_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.{}.vrt'.format(target_src.lower().replace(':', ''))
target_pixel_res = "0.00025"
subprocess.call(['gdalwarp',
'-t', target_src,
'-of', 'VRT',
'-tr', target_pixel_res, target_pixel_res,
src_vrt, reprojected_vrt]).wait()
target_dir = 'tiles/'
pixel_size = '4000'
tile_index = 'tile_grid.shp'
output_format = 'NetCDF'
create_options = 'FORMAT=NC4'
subprocess.call(['gdal_retile.py', '-v', '-targetDir', target_dir,
'-ps', pixel_size, pixel_size,
'-tileIndex', tile_index,
'-of', output_format,
'-co', create_options,
reprojected_vrt]).wait()
cfa_format = 'NETCDF4'
nc4_aggregate_name = 'cfa_aggregate.nc'
input_files = '*.nc'
subprocess.call(['cfa', '-f', cfa_format, '-o', nc4_aggregate_name, input_files], shell=True).wait()
#Pixel resolution (Fraction of a degree)
#-tr 0.00025 0.00025
#Force to nest within the grid definition
#-tap
#Nearest neighbour vs convolution. Depends on whether discrete values
#-r resampling_method
|
Add a script to reproject and tile an input dataset
|
Add a script to reproject and tile an input dataset
Very simple at this stage, simply run a series of external gdal
commands from a python script.
|
Python
|
bsd-3-clause
|
omad/datacube-experiments
|
Add a script to reproject and tile an input dataset
Very simple at this stage, simply run a series of external gdal
commands from a python script.
|
import subprocess
src_files = '/g/data/rs0/scenes/ARG25_V0.0/2015-04/LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425/scene01/*.tif'
src_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.vrt'
subprocess.call(['gdalbuildvrt', src_vrt, src_files], shell=True).wait()
target_src = 'EPSG:4326'
reprojected_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.{}.vrt'.format(target_src.lower().replace(':', ''))
target_pixel_res = "0.00025"
subprocess.call(['gdalwarp',
'-t', target_src,
'-of', 'VRT',
'-tr', target_pixel_res, target_pixel_res,
src_vrt, reprojected_vrt]).wait()
target_dir = 'tiles/'
pixel_size = '4000'
tile_index = 'tile_grid.shp'
output_format = 'NetCDF'
create_options = 'FORMAT=NC4'
subprocess.call(['gdal_retile.py', '-v', '-targetDir', target_dir,
'-ps', pixel_size, pixel_size,
'-tileIndex', tile_index,
'-of', output_format,
'-co', create_options,
reprojected_vrt]).wait()
cfa_format = 'NETCDF4'
nc4_aggregate_name = 'cfa_aggregate.nc'
input_files = '*.nc'
subprocess.call(['cfa', '-f', cfa_format, '-o', nc4_aggregate_name, input_files], shell=True).wait()
#Pixel resolution (Fraction of a degree)
#-tr 0.00025 0.00025
#Force to nest within the grid definition
#-tap
#Nearest neighbour vs convolution. Depends on whether discrete values
#-r resampling_method
|
<commit_before><commit_msg>Add a script to reproject and tile an input dataset
Very simple at this stage, simply run a series of external gdal
commands from a python script.<commit_after>
|
import subprocess
src_files = '/g/data/rs0/scenes/ARG25_V0.0/2015-04/LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425/scene01/*.tif'
src_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.vrt'
subprocess.call(['gdalbuildvrt', src_vrt, src_files], shell=True).wait()
target_src = 'EPSG:4326'
reprojected_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.{}.vrt'.format(target_src.lower().replace(':', ''))
target_pixel_res = "0.00025"
subprocess.call(['gdalwarp',
'-t', target_src,
'-of', 'VRT',
'-tr', target_pixel_res, target_pixel_res,
src_vrt, reprojected_vrt]).wait()
target_dir = 'tiles/'
pixel_size = '4000'
tile_index = 'tile_grid.shp'
output_format = 'NetCDF'
create_options = 'FORMAT=NC4'
subprocess.call(['gdal_retile.py', '-v', '-targetDir', target_dir,
'-ps', pixel_size, pixel_size,
'-tileIndex', tile_index,
'-of', output_format,
'-co', create_options,
reprojected_vrt]).wait()
cfa_format = 'NETCDF4'
nc4_aggregate_name = 'cfa_aggregate.nc'
input_files = '*.nc'
subprocess.call(['cfa', '-f', cfa_format, '-o', nc4_aggregate_name, input_files], shell=True).wait()
#Pixel resolution (Fraction of a degree)
#-tr 0.00025 0.00025
#Force to nest within the grid definition
#-tap
#Nearest neighbour vs convolution. Depends on whether discrete values
#-r resampling_method
|
Add a script to reproject and tile an input dataset
Very simple at this stage, simply run a series of external gdal
commands from a python script.
import subprocess
src_files = '/g/data/rs0/scenes/ARG25_V0.0/2015-04/LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425/scene01/*.tif'
src_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.vrt'
subprocess.call(['gdalbuildvrt', src_vrt, src_files], shell=True).wait()
target_src = 'EPSG:4326'
reprojected_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.{}.vrt'.format(target_src.lower().replace(':', ''))
target_pixel_res = "0.00025"
subprocess.call(['gdalwarp',
'-t', target_src,
'-of', 'VRT',
'-tr', target_pixel_res, target_pixel_res,
src_vrt, reprojected_vrt]).wait()
target_dir = 'tiles/'
pixel_size = '4000'
tile_index = 'tile_grid.shp'
output_format = 'NetCDF'
create_options = 'FORMAT=NC4'
subprocess.call(['gdal_retile.py', '-v', '-targetDir', target_dir,
'-ps', pixel_size, pixel_size,
'-tileIndex', tile_index,
'-of', output_format,
'-co', create_options,
reprojected_vrt]).wait()
cfa_format = 'NETCDF4'
nc4_aggregate_name = 'cfa_aggregate.nc'
input_files = '*.nc'
subprocess.call(['cfa', '-f', cfa_format, '-o', nc4_aggregate_name, input_files], shell=True).wait()
#Pixel resolution (Fraction of a degree)
#-tr 0.00025 0.00025
#Force to nest within the grid definition
#-tap
#Nearest neighbour vs convolution. Depends on whether discrete values
#-r resampling_method
|
<commit_before><commit_msg>Add a script to reproject and tile an input dataset
Very simple at this stage, simply run a series of external gdal
commands from a python script.<commit_after>
import subprocess
src_files = '/g/data/rs0/scenes/ARG25_V0.0/2015-04/LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425/scene01/*.tif'
src_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.vrt'
subprocess.call(['gdalbuildvrt', src_vrt, src_files], shell=True).wait()
target_src = 'EPSG:4326'
reprojected_vrt = 'LS7_ETM_NBAR_P54_GANBAR01-002_089_081_20150425.{}.vrt'.format(target_src.lower().replace(':', ''))
target_pixel_res = "0.00025"
subprocess.call(['gdalwarp',
'-t', target_src,
'-of', 'VRT',
'-tr', target_pixel_res, target_pixel_res,
src_vrt, reprojected_vrt]).wait()
target_dir = 'tiles/'
pixel_size = '4000'
tile_index = 'tile_grid.shp'
output_format = 'NetCDF'
create_options = 'FORMAT=NC4'
subprocess.call(['gdal_retile.py', '-v', '-targetDir', target_dir,
'-ps', pixel_size, pixel_size,
'-tileIndex', tile_index,
'-of', output_format,
'-co', create_options,
reprojected_vrt]).wait()
cfa_format = 'NETCDF4'
nc4_aggregate_name = 'cfa_aggregate.nc'
input_files = '*.nc'
subprocess.call(['cfa', '-f', cfa_format, '-o', nc4_aggregate_name, input_files], shell=True).wait()
#Pixel resolution (Fraction of a degree)
#-tr 0.00025 0.00025
#Force to nest within the grid definition
#-tap
#Nearest neighbour vs convolution. Depends on whether discrete values
#-r resampling_method
|
|
3ce13307cfaca4ed0e069ed1d1f61f4afd2dca85
|
greatbigcrane/job_queue/management/commands/job_processor.py
|
greatbigcrane/job_queue/management/commands/job_processor.py
|
import zmq
import time
import json
from django.core.management.base import NoArgsCommand
addr = 'tcp://127.0.0.1:5555'
class Command(NoArgsCommand):
help = "Run the 0MQ based job processor. Accepts jobs from the job server and processes them."
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(addr)
while True:
socket.send("GET")
job = socket.recv()
if job == "EMPTY":
time.sleep(1)
continue
job = json.loads(job)
command = command_map[job['command']]
del job['command']
command(**job)
# Create the actual commands here and keep the command_map below up to date
def bootstrap(project_id):
'''Run the bootstrap process inside the given project's base directory.'''
print("bootstrapping %s" % project_id)
command_map = {
'BOOTSTRAP': bootstrap,
}
|
Move the basic job processor into a django management command as well.
|
Move the basic job processor into a django management command as well.
|
Python
|
apache-2.0
|
pnomolos/greatbigcrane,pnomolos/greatbigcrane
|
Move the basic job processor into a django management command as well.
|
import zmq
import time
import json
from django.core.management.base import NoArgsCommand
addr = 'tcp://127.0.0.1:5555'
class Command(NoArgsCommand):
help = "Run the 0MQ based job processor. Accepts jobs from the job server and processes them."
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(addr)
while True:
socket.send("GET")
job = socket.recv()
if job == "EMPTY":
time.sleep(1)
continue
job = json.loads(job)
command = command_map[job['command']]
del job['command']
command(**job)
# Create the actual commands here and keep the command_map below up to date
def bootstrap(project_id):
'''Run the bootstrap process inside the given project's base directory.'''
print("bootstrapping %s" % project_id)
command_map = {
'BOOTSTRAP': bootstrap,
}
|
<commit_before><commit_msg>Move the basic job processor into a django management command as well.<commit_after>
|
import zmq
import time
import json
from django.core.management.base import NoArgsCommand
addr = 'tcp://127.0.0.1:5555'
class Command(NoArgsCommand):
help = "Run the 0MQ based job processor. Accepts jobs from the job server and processes them."
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(addr)
while True:
socket.send("GET")
job = socket.recv()
if job == "EMPTY":
time.sleep(1)
continue
job = json.loads(job)
command = command_map[job['command']]
del job['command']
command(**job)
# Create the actual commands here and keep the command_map below up to date
def bootstrap(project_id):
'''Run the bootstrap process inside the given project's base directory.'''
print("bootstrapping %s" % project_id)
command_map = {
'BOOTSTRAP': bootstrap,
}
|
Move the basic job processor into a django management command as well.import zmq
import time
import json
from django.core.management.base import NoArgsCommand
addr = 'tcp://127.0.0.1:5555'
class Command(NoArgsCommand):
help = "Run the 0MQ based job processor. Accepts jobs from the job server and processes them."
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(addr)
while True:
socket.send("GET")
job = socket.recv()
if job == "EMPTY":
time.sleep(1)
continue
job = json.loads(job)
command = command_map[job['command']]
del job['command']
command(**job)
# Create the actual commands here and keep the command_map below up to date
def bootstrap(project_id):
'''Run the bootstrap process inside the given project's base directory.'''
print("bootstrapping %s" % project_id)
command_map = {
'BOOTSTRAP': bootstrap,
}
|
<commit_before><commit_msg>Move the basic job processor into a django management command as well.<commit_after>import zmq
import time
import json
from django.core.management.base import NoArgsCommand
addr = 'tcp://127.0.0.1:5555'
class Command(NoArgsCommand):
help = "Run the 0MQ based job processor. Accepts jobs from the job server and processes them."
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(addr)
while True:
socket.send("GET")
job = socket.recv()
if job == "EMPTY":
time.sleep(1)
continue
job = json.loads(job)
command = command_map[job['command']]
del job['command']
command(**job)
# Create the actual commands here and keep the command_map below up to date
def bootstrap(project_id):
'''Run the bootstrap process inside the given project's base directory.'''
print("bootstrapping %s" % project_id)
command_map = {
'BOOTSTRAP': bootstrap,
}
|
|
1b6b9d53d851918f7b12db96d4029e1ab0c0f21e
|
osf/migrations/0041_auto_20170308_1932.py
|
osf/migrations/0041_auto_20170308_1932.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 01:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0040_ensure_root_field'),
]
operations = [
migrations.AlterIndexTogether(
name='abstractnode',
index_together=set([('is_public', 'is_deleted', 'type')]),
),
]
|
Add migration for abstractnode index
|
Add migration for abstractnode index
|
Python
|
apache-2.0
|
mfraezz/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,chennan47/osf.io,acshi/osf.io,mattclark/osf.io,saradbowman/osf.io,crcresearch/osf.io,crcresearch/osf.io,chrisseto/osf.io,adlius/osf.io,sloria/osf.io,felliott/osf.io,mfraezz/osf.io,aaxelb/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,caneruguz/osf.io,Johnetordoff/osf.io,hmoco/osf.io,caseyrollins/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,aaxelb/osf.io,sloria/osf.io,adlius/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,icereval/osf.io,adlius/osf.io,mattclark/osf.io,mattclark/osf.io,chrisseto/osf.io,erinspace/osf.io,TomBaxter/osf.io,caneruguz/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,felliott/osf.io,acshi/osf.io,acshi/osf.io,HalcyonChimera/osf.io,adlius/osf.io,caneruguz/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,chrisseto/osf.io,Nesiehr/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,chrisseto/osf.io,binoculars/osf.io,pattisdr/osf.io,felliott/osf.io,binoculars/osf.io,Johnetordoff/osf.io,binoculars/osf.io,erinspace/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,icereval/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,cslzchen/osf.io,crcresearch/osf.io,cslzchen/osf.io,sloria/osf.io,erinspace/osf.io,cwisecarver/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,pattisdr/osf.io,baylee-d/osf.io,icereval/osf.io,mfraezz/osf.io,cwisecarver/osf.io,leb2dg/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,felliott/osf.io,cslzchen/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,hmoco/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,aaxelb/osf.io,leb2dg/osf.io,pattisdr/osf.io,cwisecarver/osf.io,acshi/osf.io,caneruguz/osf.io
|
Add migration for abstractnode index
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 01:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0040_ensure_root_field'),
]
operations = [
migrations.AlterIndexTogether(
name='abstractnode',
index_together=set([('is_public', 'is_deleted', 'type')]),
),
]
|
<commit_before><commit_msg>Add migration for abstractnode index<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 01:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0040_ensure_root_field'),
]
operations = [
migrations.AlterIndexTogether(
name='abstractnode',
index_together=set([('is_public', 'is_deleted', 'type')]),
),
]
|
Add migration for abstractnode index# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 01:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0040_ensure_root_field'),
]
operations = [
migrations.AlterIndexTogether(
name='abstractnode',
index_together=set([('is_public', 'is_deleted', 'type')]),
),
]
|
<commit_before><commit_msg>Add migration for abstractnode index<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-09 01:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0040_ensure_root_field'),
]
operations = [
migrations.AlterIndexTogether(
name='abstractnode',
index_together=set([('is_public', 'is_deleted', 'type')]),
),
]
|
|
65de21cddbd215f8744122a4fa7f10a8ce5e5fa1
|
ideas/migrations/0001_initial.py
|
ideas/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('votes', models.IntegerField(default=0)),
],
),
]
|
Add new file for idea migrations
|
Add new file for idea migrations
|
Python
|
mit
|
neosergio/vote_hackatrix_backend
|
Add new file for idea migrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('votes', models.IntegerField(default=0)),
],
),
]
|
<commit_before><commit_msg>Add new file for idea migrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('votes', models.IntegerField(default=0)),
],
),
]
|
Add new file for idea migrations# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('votes', models.IntegerField(default=0)),
],
),
]
|
<commit_before><commit_msg>Add new file for idea migrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField()),
('votes', models.IntegerField(default=0)),
],
),
]
|
|
f7bcbafb0b1668c843474fd3a0b4dfd26f230d2a
|
tools/perf/benchmarks/blink_perf.py
|
tools/perf/benchmarks/blink_perf.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
enabled = False # crbug.com/320042
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
Disable web animations benchmark. It is crashing.
|
Disable web animations benchmark. It is crashing.
BUG=320042
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/70233018
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@235452 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
Chilledheart/chromium,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,ChromiumWebApps/chromium,Jonekee/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Just-D/chromium-1,Just-D/chromium-1,littlstar/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,dednal/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,jaruba/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,ltilve/chromium,littlstar/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,anirudhSK/chromium,M4sse/chromium.src,bright-sparks/chromium-spacewalk,anirudhSK/chromium,Chilledheart/chromium,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,markYoungH/chromium.src,anirudhSK/chromium,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,Jonekee/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,M4sse/chromium.src,Chilledheart/chromium,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,Jonekee/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,Chilledheart/chromium,Chilledheart/chromium,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,ltilve/chromium,Chilledheart/chromium,ondra-novak/chromium.src,Just-D/chromium-1,jaruba/chromium.src,dednal/chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,ltilve/chromium,patrickm/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,krieger-od/nwjs_chromium.src
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
Disable web animations benchmark. It is crashing.
BUG=320042
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/70233018
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@235452 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
enabled = False # crbug.com/320042
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
<commit_before># Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
<commit_msg>Disable web animations benchmark. It is crashing.
BUG=320042
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/70233018
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@235452 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
enabled = False # crbug.com/320042
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
Disable web animations benchmark. It is crashing.
BUG=320042
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/70233018
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@235452 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
enabled = False # crbug.com/320042
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
<commit_before># Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
<commit_msg>Disable web animations benchmark. It is crashing.
BUG=320042
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/70233018
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@235452 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import test
from telemetry.core import util
from measurements import blink_perf
class BlinkPerfAll(test.Test):
tag = 'all'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfAnimation(test.Test):
tag = 'animation'
test = blink_perf.BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
class BlinkPerfWebAnimations(test.Test):
tag = 'web_animations'
test = blink_perf.BlinkPerfMeasurement
enabled = False # crbug.com/320042
def CreatePageSet(self, options):
path = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests', 'Animation')
return blink_perf.CreatePageSetFromPath(path)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-web-animations-css')
|
8db1da8cb9e7ace878d0a8f041ec0d466b1419ef
|
glacier_test.py
|
glacier_test.py
|
#!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import unittest
from mock import Mock
import glacier
class TestCase(unittest.TestCase):
def init_app(self, args):
self.connection = Mock()
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
vault.create_archive_from_file.assert_called_once_with(
file_obj=sys.stdin, description='<stdin>')
|
Add test for upload with '-'
|
Add test for upload with '-'
|
Python
|
mit
|
basak/glacier-cli,mhubig/glacier-cli,basak/glacier-cli,mhubig/glacier-cli
|
Add test for upload with '-'
|
#!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import unittest
from mock import Mock
import glacier
class TestCase(unittest.TestCase):
def init_app(self, args):
self.connection = Mock()
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
vault.create_archive_from_file.assert_called_once_with(
file_obj=sys.stdin, description='<stdin>')
|
<commit_before><commit_msg>Add test for upload with '-'<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import unittest
from mock import Mock
import glacier
class TestCase(unittest.TestCase):
def init_app(self, args):
self.connection = Mock()
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
vault.create_archive_from_file.assert_called_once_with(
file_obj=sys.stdin, description='<stdin>')
|
Add test for upload with '-'#!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import unittest
from mock import Mock
import glacier
class TestCase(unittest.TestCase):
def init_app(self, args):
self.connection = Mock()
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
vault.create_archive_from_file.assert_called_once_with(
file_obj=sys.stdin, description='<stdin>')
|
<commit_before><commit_msg>Add test for upload with '-'<commit_after>#!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import unittest
from mock import Mock
import glacier
class TestCase(unittest.TestCase):
def init_app(self, args):
self.connection = Mock()
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
vault.create_archive_from_file.assert_called_once_with(
file_obj=sys.stdin, description='<stdin>')
|
|
d885ae5d2a2444e70a8b141ae8742d4fd0bd1cb7
|
run_faults.py
|
run_faults.py
|
#!/usr/bin/env python
import os
import sys
import glob
import clawpack.clawutil.tests as clawtests
class FaultTest(clawtests.Test):
def __init__(self, deformation_file):
super(FaultTest, self).__init__()
self.type = "compsys"
self.name = "guerrero_gap"
self.prefix = os.path.basename(deformation_file).split('.')[0]
self.deformation_file = os.path.abspath(deformation_file)
self.executable = 'xgeoclaw'
# Data objects
import setrun
self.rundata = setrun.setrun()
# Add deformation file
self.rundata.dtopo_data.dtopofiles = []
self.rundata.dtopo_data.dtopofiles.append([1,5,5,self.deformation_file])
def __str__(self):
output = super(FaultTest, self).__str__()
output += "\n Deformation File: %s" % self.deformation_file
return output
if __name__ == '__main__':
if len(sys.argv) > 1:
deformation_files = sys.argv[1:]
else:
deformation_files = glob.glob('./bathy/rot_gap*.xyzt')
tests = []
for deformation_file in deformation_files:
tests.append(FaultTest(deformation_file))
controller = clawtests.TestController(tests)
print controller
controller.tar = True
controller.run()
|
Add batch processing of runs
|
Add batch processing of runs
|
Python
|
mit
|
mandli/compsyn-geoclaw
|
Add batch processing of runs
|
#!/usr/bin/env python
import os
import sys
import glob
import clawpack.clawutil.tests as clawtests
class FaultTest(clawtests.Test):
def __init__(self, deformation_file):
super(FaultTest, self).__init__()
self.type = "compsys"
self.name = "guerrero_gap"
self.prefix = os.path.basename(deformation_file).split('.')[0]
self.deformation_file = os.path.abspath(deformation_file)
self.executable = 'xgeoclaw'
# Data objects
import setrun
self.rundata = setrun.setrun()
# Add deformation file
self.rundata.dtopo_data.dtopofiles = []
self.rundata.dtopo_data.dtopofiles.append([1,5,5,self.deformation_file])
def __str__(self):
output = super(FaultTest, self).__str__()
output += "\n Deformation File: %s" % self.deformation_file
return output
if __name__ == '__main__':
if len(sys.argv) > 1:
deformation_files = sys.argv[1:]
else:
deformation_files = glob.glob('./bathy/rot_gap*.xyzt')
tests = []
for deformation_file in deformation_files:
tests.append(FaultTest(deformation_file))
controller = clawtests.TestController(tests)
print controller
controller.tar = True
controller.run()
|
<commit_before><commit_msg>Add batch processing of runs<commit_after>
|
#!/usr/bin/env python
import os
import sys
import glob
import clawpack.clawutil.tests as clawtests
class FaultTest(clawtests.Test):
def __init__(self, deformation_file):
super(FaultTest, self).__init__()
self.type = "compsys"
self.name = "guerrero_gap"
self.prefix = os.path.basename(deformation_file).split('.')[0]
self.deformation_file = os.path.abspath(deformation_file)
self.executable = 'xgeoclaw'
# Data objects
import setrun
self.rundata = setrun.setrun()
# Add deformation file
self.rundata.dtopo_data.dtopofiles = []
self.rundata.dtopo_data.dtopofiles.append([1,5,5,self.deformation_file])
def __str__(self):
output = super(FaultTest, self).__str__()
output += "\n Deformation File: %s" % self.deformation_file
return output
if __name__ == '__main__':
if len(sys.argv) > 1:
deformation_files = sys.argv[1:]
else:
deformation_files = glob.glob('./bathy/rot_gap*.xyzt')
tests = []
for deformation_file in deformation_files:
tests.append(FaultTest(deformation_file))
controller = clawtests.TestController(tests)
print controller
controller.tar = True
controller.run()
|
Add batch processing of runs#!/usr/bin/env python
import os
import sys
import glob
import clawpack.clawutil.tests as clawtests
class FaultTest(clawtests.Test):
def __init__(self, deformation_file):
super(FaultTest, self).__init__()
self.type = "compsys"
self.name = "guerrero_gap"
self.prefix = os.path.basename(deformation_file).split('.')[0]
self.deformation_file = os.path.abspath(deformation_file)
self.executable = 'xgeoclaw'
# Data objects
import setrun
self.rundata = setrun.setrun()
# Add deformation file
self.rundata.dtopo_data.dtopofiles = []
self.rundata.dtopo_data.dtopofiles.append([1,5,5,self.deformation_file])
def __str__(self):
output = super(FaultTest, self).__str__()
output += "\n Deformation File: %s" % self.deformation_file
return output
if __name__ == '__main__':
if len(sys.argv) > 1:
deformation_files = sys.argv[1:]
else:
deformation_files = glob.glob('./bathy/rot_gap*.xyzt')
tests = []
for deformation_file in deformation_files:
tests.append(FaultTest(deformation_file))
controller = clawtests.TestController(tests)
print controller
controller.tar = True
controller.run()
|
<commit_before><commit_msg>Add batch processing of runs<commit_after>#!/usr/bin/env python
import os
import sys
import glob
import clawpack.clawutil.tests as clawtests
class FaultTest(clawtests.Test):
def __init__(self, deformation_file):
super(FaultTest, self).__init__()
self.type = "compsys"
self.name = "guerrero_gap"
self.prefix = os.path.basename(deformation_file).split('.')[0]
self.deformation_file = os.path.abspath(deformation_file)
self.executable = 'xgeoclaw'
# Data objects
import setrun
self.rundata = setrun.setrun()
# Add deformation file
self.rundata.dtopo_data.dtopofiles = []
self.rundata.dtopo_data.dtopofiles.append([1,5,5,self.deformation_file])
def __str__(self):
output = super(FaultTest, self).__str__()
output += "\n Deformation File: %s" % self.deformation_file
return output
if __name__ == '__main__':
if len(sys.argv) > 1:
deformation_files = sys.argv[1:]
else:
deformation_files = glob.glob('./bathy/rot_gap*.xyzt')
tests = []
for deformation_file in deformation_files:
tests.append(FaultTest(deformation_file))
controller = clawtests.TestController(tests)
print controller
controller.tar = True
controller.run()
|
|
5074752a42348dfe7b8c28152571960efe57f241
|
corehq/ex-submodules/phonelog/migrations/0008_devicelog_varchar_index.py
|
corehq/ex-submodules/phonelog/migrations/0008_devicelog_varchar_index.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-01 18:37
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunSQL
from corehq.util.django_migrations import add_if_not_exists
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0007_devicelog_indexes'),
]
operations = [
HqRunSQL(
add_if_not_exists(
"""
CREATE INDEX phonelog_devicereportentry_domain_device_id_pattern_ops
ON phonelog_devicereportentry (domain varchar_pattern_ops, device_id varchar_pattern_ops)
"""
),
reverse_sql=
"""
DROP INDEX IF EXISTS phonelog_devicereportentry_domain_device_id_pattern_ops
""",
)
]
|
Revert "Revert "adding varchar_pattern_ops index to devicelog table""
|
Revert "Revert "adding varchar_pattern_ops index to devicelog table""
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Revert "Revert "adding varchar_pattern_ops index to devicelog table""
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-01 18:37
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunSQL
from corehq.util.django_migrations import add_if_not_exists
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0007_devicelog_indexes'),
]
operations = [
HqRunSQL(
add_if_not_exists(
"""
CREATE INDEX phonelog_devicereportentry_domain_device_id_pattern_ops
ON phonelog_devicereportentry (domain varchar_pattern_ops, device_id varchar_pattern_ops)
"""
),
reverse_sql=
"""
DROP INDEX IF EXISTS phonelog_devicereportentry_domain_device_id_pattern_ops
""",
)
]
|
<commit_before><commit_msg>Revert "Revert "adding varchar_pattern_ops index to devicelog table""<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-01 18:37
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunSQL
from corehq.util.django_migrations import add_if_not_exists
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0007_devicelog_indexes'),
]
operations = [
HqRunSQL(
add_if_not_exists(
"""
CREATE INDEX phonelog_devicereportentry_domain_device_id_pattern_ops
ON phonelog_devicereportentry (domain varchar_pattern_ops, device_id varchar_pattern_ops)
"""
),
reverse_sql=
"""
DROP INDEX IF EXISTS phonelog_devicereportentry_domain_device_id_pattern_ops
""",
)
]
|
Revert "Revert "adding varchar_pattern_ops index to devicelog table""# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-01 18:37
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunSQL
from corehq.util.django_migrations import add_if_not_exists
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0007_devicelog_indexes'),
]
operations = [
HqRunSQL(
add_if_not_exists(
"""
CREATE INDEX phonelog_devicereportentry_domain_device_id_pattern_ops
ON phonelog_devicereportentry (domain varchar_pattern_ops, device_id varchar_pattern_ops)
"""
),
reverse_sql=
"""
DROP INDEX IF EXISTS phonelog_devicereportentry_domain_device_id_pattern_ops
""",
)
]
|
<commit_before><commit_msg>Revert "Revert "adding varchar_pattern_ops index to devicelog table""<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-01 18:37
from __future__ import unicode_literals
from django.db import migrations
from corehq.sql_db.operations import HqRunSQL
from corehq.util.django_migrations import add_if_not_exists
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0007_devicelog_indexes'),
]
operations = [
HqRunSQL(
add_if_not_exists(
"""
CREATE INDEX phonelog_devicereportentry_domain_device_id_pattern_ops
ON phonelog_devicereportentry (domain varchar_pattern_ops, device_id varchar_pattern_ops)
"""
),
reverse_sql=
"""
DROP INDEX IF EXISTS phonelog_devicereportentry_domain_device_id_pattern_ops
""",
)
]
|
|
f032f456a2d601b7db6461ea44916cf7588e0c81
|
src/python/SparkSQLTwitter.py
|
src/python/SparkSQLTwitter.py
|
# A simple demo for working with SparkSQL and Tweets
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext()
sqlCtx = SQLContext(sc)
|
Add the start of Spark SQL twitter demo in python
|
Add the start of Spark SQL twitter demo in python
|
Python
|
mit
|
coursera4ashok/learning-spark,junwucs/learning-spark,kod3r/learning-spark,jaehyuk/learning-spark,diogoaurelio/learning-spark,bhagatsingh/learning-spark,gaoxuesong/learning-spark,NBSW/learning-spark,obinsanni/learning-spark,diogoaurelio/learning-spark,anjuncc/learning-spark-examples,concerned3rdparty/learning-spark,jaehyuk/learning-spark,negokaz/learning-spark,databricks/learning-spark,SunGuo/learning-spark,dsdinter/learning-spark-examples,bhagatsingh/learning-spark,DINESHKUMARMURUGAN/learning-spark,coursera4ashok/learning-spark,ramyasrigangula/learning-spark,huixiang/learning-spark,UsterNes/learning-spark,zaxliu/learning-spark,qingkaikong/learning-spark-examples,anjuncc/learning-spark-examples,huydx/learning-spark,negokaz/learning-spark,UsterNes/learning-spark,anjuncc/learning-spark-examples,baokunguo/learning-spark-examples,feynman0825/learning-spark,DINESHKUMARMURUGAN/learning-spark,gaoxuesong/learning-spark,negokaz/learning-spark,asarraf/learning-spark,asarraf/learning-spark,ellis429/learning-spark,zaxliu/learning-spark,rex1100/learning-spark,mmirolim/learning-spark,diogoaurelio/learning-spark,dsdinter/learning-spark-examples,concerned3rdparty/learning-spark,negokaz/learning-spark,anjuncc/learning-spark-examples,huydx/learning-spark,qingkaikong/learning-spark-examples,asarraf/learning-spark,NBSW/learning-spark,GatsbyNewton/learning-spark,GatsbyNewton/learning-spark,kod3r/learning-spark,ramyasrigangula/learning-spark,huixiang/learning-spark,obinsanni/learning-spark,rex1100/learning-spark,jindalcastle/learning-spark,feynman0825/learning-spark,concerned3rdparty/learning-spark,ellis429/learning-spark,kod3r/learning-spark,huixiang/learning-spark,SunGuo/learning-spark,noprom/learning-spark,junwucs/learning-spark,ellis429/learning-spark,JerryTseng/learning-spark,shimizust/learning-spark,bhagatsingh/learning-spark,ellis429/learning-spark-examples,databricks/learning-spark,mohitsh/learning-spark,huydx/learning-spark,jindalcastle/learning-spark,holdenk/learning-spark-examples,databricks/learning-spark,mmirolim/learning-spark,XiaoqingWang/learning-spark,mmirolim/learning-spark,noprom/learning-spark,kpraveen420/learning-spark,databricks/learning-spark,mohitsh/learning-spark,holdenk/learning-spark-examples,jaehyuk/learning-spark,DINESHKUMARMURUGAN/learning-spark,noprom/learning-spark,NBSW/learning-spark,kpraveen420/learning-spark,diogoaurelio/learning-spark,GatsbyNewton/learning-spark,feynman0825/learning-spark,ramyasrigangula/learning-spark,JerryTseng/learning-spark,feynman0825/learning-spark,SunGuo/learning-spark,UsterNes/learning-spark,ellis429/learning-spark-examples,junwucs/learning-spark,baokunguo/learning-spark-examples,bhagatsingh/learning-spark,holdenk/learning-spark-examples,SunGuo/learning-spark,qingkaikong/learning-spark-examples,shimizust/learning-spark,JerryTseng/learning-spark,dsdinter/learning-spark-examples,tengteng/learning-spark,asarraf/learning-spark,diogoaurelio/learning-spark,jindalcastle/learning-spark,huydx/learning-spark,UsterNes/learning-spark,obinsanni/learning-spark,DINESHKUMARMURUGAN/learning-spark,obinsanni/learning-spark,coursera4ashok/learning-spark,qingkaikong/learning-spark-examples,kpraveen420/learning-spark,coursera4ashok/learning-spark,mmirolim/learning-spark,huixiang/learning-spark,bhagatsingh/learning-spark,baokunguo/learning-spark-examples,shimizust/learning-spark,ellis429/learning-spark-examples,baokunguo/learning-spark-examples,GatsbyNewton/learning-spark,ellis429/learning-spark-examples,jindalcastle/learning-spark,anjuncc/learning-spark-examples,tengteng/learning-spark,XiaoqingWang/learning-spark,junwucs/learning-spark,jaehyuk/learning-spark,mohitsh/learning-spark,DINESHKUMARMURUGAN/learning-spark,huydx/learning-spark,holdenk/learning-spark-examples,UsterNes/learning-spark,ramyasrigangula/learning-spark,qingkaikong/learning-spark-examples,ellis429/learning-spark,jindalcastle/learning-spark,mmirolim/learning-spark,tengteng/learning-spark,SunGuo/learning-spark,feynman0825/learning-spark,kpraveen420/learning-spark,gaoxuesong/learning-spark,obinsanni/learning-spark,NBSW/learning-spark,zaxliu/learning-spark,rex1100/learning-spark,concerned3rdparty/learning-spark,junwucs/learning-spark,holdenk/learning-spark-examples,XiaoqingWang/learning-spark,ramyasrigangula/learning-spark,kpraveen420/learning-spark,databricks/learning-spark,shimizust/learning-spark,shimizust/learning-spark,gaoxuesong/learning-spark,JerryTseng/learning-spark,XiaoqingWang/learning-spark,mohitsh/learning-spark,JerryTseng/learning-spark,baokunguo/learning-spark-examples,jaehyuk/learning-spark,ellis429/learning-spark,zaxliu/learning-spark,noprom/learning-spark,huixiang/learning-spark,noprom/learning-spark,negokaz/learning-spark,mohitsh/learning-spark,XiaoqingWang/learning-spark,NBSW/learning-spark,zaxliu/learning-spark,asarraf/learning-spark,kod3r/learning-spark,kod3r/learning-spark,tengteng/learning-spark,concerned3rdparty/learning-spark,dsdinter/learning-spark-examples,dsdinter/learning-spark-examples,GatsbyNewton/learning-spark,coursera4ashok/learning-spark,gaoxuesong/learning-spark,ellis429/learning-spark-examples,tengteng/learning-spark
|
Add the start of Spark SQL twitter demo in python
|
# A simple demo for working with SparkSQL and Tweets
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext()
sqlCtx = SQLContext(sc)
|
<commit_before><commit_msg>Add the start of Spark SQL twitter demo in python<commit_after>
|
# A simple demo for working with SparkSQL and Tweets
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext()
sqlCtx = SQLContext(sc)
|
Add the start of Spark SQL twitter demo in python# A simple demo for working with SparkSQL and Tweets
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext()
sqlCtx = SQLContext(sc)
|
<commit_before><commit_msg>Add the start of Spark SQL twitter demo in python<commit_after># A simple demo for working with SparkSQL and Tweets
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
import json
import sys
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext()
sqlCtx = SQLContext(sc)
|
|
2eba1fc80263c11a1b2b5ee1707b19e98a7b2980
|
apps/submission/tests/test_models.py
|
apps/submission/tests/test_models.py
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from .. import models
class SubmissionProcessTestCase(TestCase):
def test_can_create_submission_process(self):
label = 'Candida datasest 0001'
qs = models.SubmissionProcess.objects.all()
self.assertEqual(qs.count(), 0)
process = models.SubmissionProcess.objects.create(
label=label,
)
self.assertEqual(process.label, label)
self.assertEqual(qs.count(), 1)
def test_archive_upload_to(self):
# Create process and activate tasks
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.client.post(
reverse('submission:start'),
data={
'label': 'Candida datasest 0001',
'_viewflow_activation-started': '2000-01-01',
},
follow=True,
)
process = models.SubmissionProcess.objects.get()
filename = 'archive.zip'
upload_path = models.SubmissionProcess.archive_upload_to(
process,
filename
)
expected = '{}/submissions/{}/{}'.format(
process.created_by.id,
process.id,
filename
)
self.assertEqual(upload_path, expected)
|
Add tests for the SubmissionProcess model
|
Add tests for the SubmissionProcess model
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add tests for the SubmissionProcess model
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from .. import models
class SubmissionProcessTestCase(TestCase):
def test_can_create_submission_process(self):
label = 'Candida datasest 0001'
qs = models.SubmissionProcess.objects.all()
self.assertEqual(qs.count(), 0)
process = models.SubmissionProcess.objects.create(
label=label,
)
self.assertEqual(process.label, label)
self.assertEqual(qs.count(), 1)
def test_archive_upload_to(self):
# Create process and activate tasks
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.client.post(
reverse('submission:start'),
data={
'label': 'Candida datasest 0001',
'_viewflow_activation-started': '2000-01-01',
},
follow=True,
)
process = models.SubmissionProcess.objects.get()
filename = 'archive.zip'
upload_path = models.SubmissionProcess.archive_upload_to(
process,
filename
)
expected = '{}/submissions/{}/{}'.format(
process.created_by.id,
process.id,
filename
)
self.assertEqual(upload_path, expected)
|
<commit_before><commit_msg>Add tests for the SubmissionProcess model<commit_after>
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from .. import models
class SubmissionProcessTestCase(TestCase):
def test_can_create_submission_process(self):
label = 'Candida datasest 0001'
qs = models.SubmissionProcess.objects.all()
self.assertEqual(qs.count(), 0)
process = models.SubmissionProcess.objects.create(
label=label,
)
self.assertEqual(process.label, label)
self.assertEqual(qs.count(), 1)
def test_archive_upload_to(self):
# Create process and activate tasks
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.client.post(
reverse('submission:start'),
data={
'label': 'Candida datasest 0001',
'_viewflow_activation-started': '2000-01-01',
},
follow=True,
)
process = models.SubmissionProcess.objects.get()
filename = 'archive.zip'
upload_path = models.SubmissionProcess.archive_upload_to(
process,
filename
)
expected = '{}/submissions/{}/{}'.format(
process.created_by.id,
process.id,
filename
)
self.assertEqual(upload_path, expected)
|
Add tests for the SubmissionProcess modelfrom django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from .. import models
class SubmissionProcessTestCase(TestCase):
def test_can_create_submission_process(self):
label = 'Candida datasest 0001'
qs = models.SubmissionProcess.objects.all()
self.assertEqual(qs.count(), 0)
process = models.SubmissionProcess.objects.create(
label=label,
)
self.assertEqual(process.label, label)
self.assertEqual(qs.count(), 1)
def test_archive_upload_to(self):
# Create process and activate tasks
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.client.post(
reverse('submission:start'),
data={
'label': 'Candida datasest 0001',
'_viewflow_activation-started': '2000-01-01',
},
follow=True,
)
process = models.SubmissionProcess.objects.get()
filename = 'archive.zip'
upload_path = models.SubmissionProcess.archive_upload_to(
process,
filename
)
expected = '{}/submissions/{}/{}'.format(
process.created_by.id,
process.id,
filename
)
self.assertEqual(upload_path, expected)
|
<commit_before><commit_msg>Add tests for the SubmissionProcess model<commit_after>from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
from .. import models
class SubmissionProcessTestCase(TestCase):
def test_can_create_submission_process(self):
label = 'Candida datasest 0001'
qs = models.SubmissionProcess.objects.all()
self.assertEqual(qs.count(), 0)
process = models.SubmissionProcess.objects.create(
label=label,
)
self.assertEqual(process.label, label)
self.assertEqual(qs.count(), 1)
def test_archive_upload_to(self):
# Create process and activate tasks
self.user = PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=PIXELER_PASSWORD,
)
self.client.post(
reverse('submission:start'),
data={
'label': 'Candida datasest 0001',
'_viewflow_activation-started': '2000-01-01',
},
follow=True,
)
process = models.SubmissionProcess.objects.get()
filename = 'archive.zip'
upload_path = models.SubmissionProcess.archive_upload_to(
process,
filename
)
expected = '{}/submissions/{}/{}'.format(
process.created_by.id,
process.id,
filename
)
self.assertEqual(upload_path, expected)
|
|
ffe1e77a21c6b0a515d92f1d67406e01a0316341
|
examples/test_chromedriver.py
|
examples/test_chromedriver.py
|
"""
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromeTestClass(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
|
Add a test to detect if using an out-of-date ChromeDriver
|
Add a test to detect if using an out-of-date ChromeDriver
|
Python
|
mit
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add a test to detect if using an out-of-date ChromeDriver
|
"""
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromeTestClass(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
|
<commit_before><commit_msg>Add a test to detect if using an out-of-date ChromeDriver<commit_after>
|
"""
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromeTestClass(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
|
Add a test to detect if using an out-of-date ChromeDriver"""
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromeTestClass(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
|
<commit_before><commit_msg>Add a test to detect if using an out-of-date ChromeDriver<commit_after>"""
This test is only for Chrome!
(Verify that your chromedriver is compatible with your version of Chrome.)
"""
import colorama
from seleniumbase import BaseCase
class ChromeTestClass(BaseCase):
def test_chromedriver_matches_chrome(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
driver_capabilities = self.driver.__dict__["capabilities"]
if "version" in driver_capabilities:
chrome_version = driver_capabilities["version"]
else:
chrome_version = driver_capabilities["browserVersion"]
major_chrome_version = chrome_version.split('.')[0]
chrome_dict = self.driver.__dict__["capabilities"]["chrome"]
chromedriver_version = chrome_dict["chromedriverVersion"]
chromedriver_version = chromedriver_version.split(' ')[0]
major_chromedriver_version = chromedriver_version.split('.')[0]
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
pr_chromedriver_version = c3 + chromedriver_version + cr
pr_chrome_version = c2 + chrome_version + cr
message = (
"\n"
"* Your version of chromedriver is: %s\n"
"*\n* And your version of Chrome is: %s"
"" % (pr_chromedriver_version, pr_chrome_version))
print(message)
if major_chromedriver_version < major_chrome_version:
install_sb = (
"seleniumbase install chromedriver %s" % major_chrome_version)
pr_install_sb = c1 + install_sb + cr
up_msg = "You may want to upgrade your version of chromedriver:"
up_msg = c4 + up_msg + cr
message = ("*\n* %s\n*\n* >>> %s" % (up_msg, pr_install_sb))
print(message)
elif major_chromedriver_version > major_chrome_version:
up_msg = "You may want to upgrade your version of Chrome:"
up_msg = c5 + up_msg + cr
up_url = c1 + "chrome://settings/help" + cr
message = ("*\n* %s\n*\n* See: %s" % (up_msg, up_url))
print(message)
else:
up_msg = (
"Success! Your chromedriver is compatible with your Chrome!")
up_msg = c1 + up_msg + cr
message = ("*\n* %s\n" % up_msg)
print(message)
|
|
268d97d4a655e7081a2c550df8b1e927e5c6c4ab
|
ibeis/tests/test_ibs_uuid.py
|
ibeis/tests/test_ibs_uuid.py
|
#!/usr/bin/env python2.7
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
from ibeis.dev import ibsfuncs
#from itertools import izip
# Python
import multiprocessing
#import numpy as np
from uuid import UUID
# Tools
import utool
from ibeis.control.IBEISControl import IBEISController
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_UUID]')
from vtool import image
from ibeis.model.preproc import preproc_image
def TEST_ENCOUNTERS(ibs):
print('[TEST_UUID]')
img = image.imread('/home/hendrik/Pictures/ysbeer.jpg')
uuid = preproc_image.get_image_uuid(img)
uuid2 = UUID('cf55d6e3-908f-ac6c-9a5c-cd350f3e5682')
print('uuid = %r' % uuid)
print('uuid2 = %r' % uuid2)
assert uuid == uuid2, 'uuid and uuid2 do not match'
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb1', gui=False)
ibs = main_locals['ibs']
test_locals = utool.run_test(TEST_ENCOUNTERS, ibs)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
|
Test to check if uuids are consistent across machines
|
Test to check if uuids are consistent across machines
|
Python
|
apache-2.0
|
Erotemic/ibeis,Erotemic/ibeis,SU-ECE-17-7/ibeis,SU-ECE-17-7/ibeis,SU-ECE-17-7/ibeis,SU-ECE-17-7/ibeis
|
Test to check if uuids are consistent across machines
|
#!/usr/bin/env python2.7
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
from ibeis.dev import ibsfuncs
#from itertools import izip
# Python
import multiprocessing
#import numpy as np
from uuid import UUID
# Tools
import utool
from ibeis.control.IBEISControl import IBEISController
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_UUID]')
from vtool import image
from ibeis.model.preproc import preproc_image
def TEST_ENCOUNTERS(ibs):
print('[TEST_UUID]')
img = image.imread('/home/hendrik/Pictures/ysbeer.jpg')
uuid = preproc_image.get_image_uuid(img)
uuid2 = UUID('cf55d6e3-908f-ac6c-9a5c-cd350f3e5682')
print('uuid = %r' % uuid)
print('uuid2 = %r' % uuid2)
assert uuid == uuid2, 'uuid and uuid2 do not match'
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb1', gui=False)
ibs = main_locals['ibs']
test_locals = utool.run_test(TEST_ENCOUNTERS, ibs)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
|
<commit_before><commit_msg>Test to check if uuids are consistent across machines<commit_after>
|
#!/usr/bin/env python2.7
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
from ibeis.dev import ibsfuncs
#from itertools import izip
# Python
import multiprocessing
#import numpy as np
from uuid import UUID
# Tools
import utool
from ibeis.control.IBEISControl import IBEISController
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_UUID]')
from vtool import image
from ibeis.model.preproc import preproc_image
def TEST_ENCOUNTERS(ibs):
print('[TEST_UUID]')
img = image.imread('/home/hendrik/Pictures/ysbeer.jpg')
uuid = preproc_image.get_image_uuid(img)
uuid2 = UUID('cf55d6e3-908f-ac6c-9a5c-cd350f3e5682')
print('uuid = %r' % uuid)
print('uuid2 = %r' % uuid2)
assert uuid == uuid2, 'uuid and uuid2 do not match'
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb1', gui=False)
ibs = main_locals['ibs']
test_locals = utool.run_test(TEST_ENCOUNTERS, ibs)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
|
Test to check if uuids are consistent across machines#!/usr/bin/env python2.7
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
from ibeis.dev import ibsfuncs
#from itertools import izip
# Python
import multiprocessing
#import numpy as np
from uuid import UUID
# Tools
import utool
from ibeis.control.IBEISControl import IBEISController
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_UUID]')
from vtool import image
from ibeis.model.preproc import preproc_image
def TEST_ENCOUNTERS(ibs):
print('[TEST_UUID]')
img = image.imread('/home/hendrik/Pictures/ysbeer.jpg')
uuid = preproc_image.get_image_uuid(img)
uuid2 = UUID('cf55d6e3-908f-ac6c-9a5c-cd350f3e5682')
print('uuid = %r' % uuid)
print('uuid2 = %r' % uuid2)
assert uuid == uuid2, 'uuid and uuid2 do not match'
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb1', gui=False)
ibs = main_locals['ibs']
test_locals = utool.run_test(TEST_ENCOUNTERS, ibs)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
|
<commit_before><commit_msg>Test to check if uuids are consistent across machines<commit_after>#!/usr/bin/env python2.7
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
from ibeis.dev import ibsfuncs
#from itertools import izip
# Python
import multiprocessing
#import numpy as np
from uuid import UUID
# Tools
import utool
from ibeis.control.IBEISControl import IBEISController
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_UUID]')
from vtool import image
from ibeis.model.preproc import preproc_image
def TEST_ENCOUNTERS(ibs):
print('[TEST_UUID]')
img = image.imread('/home/hendrik/Pictures/ysbeer.jpg')
uuid = preproc_image.get_image_uuid(img)
uuid2 = UUID('cf55d6e3-908f-ac6c-9a5c-cd350f3e5682')
print('uuid = %r' % uuid)
print('uuid2 = %r' % uuid2)
assert uuid == uuid2, 'uuid and uuid2 do not match'
return locals()
if __name__ == '__main__':
multiprocessing.freeze_support() # For windows
import ibeis
main_locals = ibeis.main(defaultdb='testdb1', gui=False)
ibs = main_locals['ibs']
test_locals = utool.run_test(TEST_ENCOUNTERS, ibs)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
|
|
9f9dc6e4e737d0141774e889d587d1b89c65a9ca
|
tests/unit/fileclient_test.py
|
tests/unit/fileclient_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch
from mock import Mock
import errno
ensure_in_syspath('../')
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EREMOTEIO)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
|
Add unit test case for fileclient
|
Add unit test case for fileclient
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test case for fileclient
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch
from mock import Mock
import errno
ensure_in_syspath('../')
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EREMOTEIO)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
|
<commit_before><commit_msg>Add unit test case for fileclient<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch
from mock import Mock
import errno
ensure_in_syspath('../')
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EREMOTEIO)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
|
Add unit test case for fileclient# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch
from mock import Mock
import errno
ensure_in_syspath('../')
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EREMOTEIO)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
|
<commit_before><commit_msg>Add unit test case for fileclient<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch
from mock import Mock
import errno
ensure_in_syspath('../')
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EREMOTEIO)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
|
|
b2737b0878aa588a99bae473a240e241463030d4
|
zerver/migrations/0421_migrate_pronouns_custom_profile_fields.py
|
zerver/migrations/0421_migrate_pronouns_custom_profile_fields.py
|
# Generated by Django 4.1.2 on 2022-10-21 06:31
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=SHORT_TEXT, name__icontains="pronoun").update(
field_type=PRONOUNS
)
def reverse_migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=PRONOUNS).update(field_type=SHORT_TEXT)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0420_alter_archivedmessage_realm_alter_message_realm"),
]
operations = [
migrations.RunPython(
migrate_pronouns_custom_profile_fields,
reverse_code=reverse_migrate_pronouns_custom_profile_fields,
elidable=True,
),
]
|
Add migration to migrate pronouns custom profile fields.
|
migrations: Add migration to migrate pronouns custom profile fields.
This commit adds a migration to migrate SHORT_TEXT type profile
fields for pronouns to recently added PRONOUNS type.
|
Python
|
apache-2.0
|
zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip
|
migrations: Add migration to migrate pronouns custom profile fields.
This commit adds a migration to migrate SHORT_TEXT type profile
fields for pronouns to recently added PRONOUNS type.
|
# Generated by Django 4.1.2 on 2022-10-21 06:31
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=SHORT_TEXT, name__icontains="pronoun").update(
field_type=PRONOUNS
)
def reverse_migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=PRONOUNS).update(field_type=SHORT_TEXT)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0420_alter_archivedmessage_realm_alter_message_realm"),
]
operations = [
migrations.RunPython(
migrate_pronouns_custom_profile_fields,
reverse_code=reverse_migrate_pronouns_custom_profile_fields,
elidable=True,
),
]
|
<commit_before><commit_msg>migrations: Add migration to migrate pronouns custom profile fields.
This commit adds a migration to migrate SHORT_TEXT type profile
fields for pronouns to recently added PRONOUNS type.<commit_after>
|
# Generated by Django 4.1.2 on 2022-10-21 06:31
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=SHORT_TEXT, name__icontains="pronoun").update(
field_type=PRONOUNS
)
def reverse_migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=PRONOUNS).update(field_type=SHORT_TEXT)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0420_alter_archivedmessage_realm_alter_message_realm"),
]
operations = [
migrations.RunPython(
migrate_pronouns_custom_profile_fields,
reverse_code=reverse_migrate_pronouns_custom_profile_fields,
elidable=True,
),
]
|
migrations: Add migration to migrate pronouns custom profile fields.
This commit adds a migration to migrate SHORT_TEXT type profile
fields for pronouns to recently added PRONOUNS type.# Generated by Django 4.1.2 on 2022-10-21 06:31
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=SHORT_TEXT, name__icontains="pronoun").update(
field_type=PRONOUNS
)
def reverse_migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=PRONOUNS).update(field_type=SHORT_TEXT)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0420_alter_archivedmessage_realm_alter_message_realm"),
]
operations = [
migrations.RunPython(
migrate_pronouns_custom_profile_fields,
reverse_code=reverse_migrate_pronouns_custom_profile_fields,
elidable=True,
),
]
|
<commit_before><commit_msg>migrations: Add migration to migrate pronouns custom profile fields.
This commit adds a migration to migrate SHORT_TEXT type profile
fields for pronouns to recently added PRONOUNS type.<commit_after># Generated by Django 4.1.2 on 2022-10-21 06:31
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=SHORT_TEXT, name__icontains="pronoun").update(
field_type=PRONOUNS
)
def reverse_migrate_pronouns_custom_profile_fields(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
CustomProfileField = apps.get_model("zerver", "CustomProfileField")
SHORT_TEXT = 1
PRONOUNS = 8
CustomProfileField.objects.filter(field_type=PRONOUNS).update(field_type=SHORT_TEXT)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0420_alter_archivedmessage_realm_alter_message_realm"),
]
operations = [
migrations.RunPython(
migrate_pronouns_custom_profile_fields,
reverse_code=reverse_migrate_pronouns_custom_profile_fields,
elidable=True,
),
]
|
|
6bb43bd4b1b2d5216fa2ec1c5692849c336353db
|
contrib/generate-cassandra-thrift.py
|
contrib/generate-cassandra-thrift.py
|
#!/usr/bin/env python
import sys
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: generate-cassandra-thrift.py /path/to/bin/thrift cassandra-version"
sys.exit(-1)
thrift_bin = sys.argv[1]
cassandra_version = sys.argv[2]
# Sanity check our location
cur_dir_list = os.listdir( os.getcwd() )
if 'libcassandra' not in cur_dir_list or 'libgenthrift' not in cur_dir_list:
print "Run from the top-level libcassandra directory"
sys.exit(-1)
# Checkout / update cassandra checkout
if 'cassandra.git' not in cur_dir_list:
subprocess.call(['git', 'clone', 'git://git.apache.org/cassandra.git', 'cassandra.git'])
subprocess.call(['git', 'fetch', 'origin'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', 'trunk'], cwd='cassandra.git')
subprocess.call(['git', 'branch', '-D', 'libcassandra'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', '-b', 'libcassandra', 'cassandra-' + cassandra_version], cwd='cassandra.git')
# Run thrift
subprocess.call([thrift_bin, '--gen', 'cpp', 'cassandra.git/interface/cassandra.thrift'])
# This leaves the generated content in gen-cpp
subprocess.call(['/bin/bash', '-x', '-c', 'cp gen-cpp/* libgenthrift/'])
# There's an inconveniently named VERSION in cassandra_constants.h and
# cassandra_constants.cpp. These only affect the build of the library (they
# aren't included by any public headers), so we just need to undef VERSION for
# them.
subprocess.call(['cp', 'libgenthrift/cassandra_constants.h', 'libgenthrift/cassandra_constants.h.orig'])
subprocess.call(['sed', 's/std::string VERSION/#undef VERSION\\nstd::string VERSION/', 'libgenthrift/cassandra_constants.h.orig'], stdout=open('libgenthrift/cassandra_constants.h', 'w'))
|
Add script for generating libgenthrift Cassandra interface files using thrift.
|
Add script for generating libgenthrift Cassandra interface files using thrift.
|
Python
|
bsd-3-clause
|
xiaozhou/libcassandra,xiaozhou/libcassandra,xiaozhou/libcassandra,xiaozhou/libcassandra
|
Add script for generating libgenthrift Cassandra interface files using thrift.
|
#!/usr/bin/env python
import sys
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: generate-cassandra-thrift.py /path/to/bin/thrift cassandra-version"
sys.exit(-1)
thrift_bin = sys.argv[1]
cassandra_version = sys.argv[2]
# Sanity check our location
cur_dir_list = os.listdir( os.getcwd() )
if 'libcassandra' not in cur_dir_list or 'libgenthrift' not in cur_dir_list:
print "Run from the top-level libcassandra directory"
sys.exit(-1)
# Checkout / update cassandra checkout
if 'cassandra.git' not in cur_dir_list:
subprocess.call(['git', 'clone', 'git://git.apache.org/cassandra.git', 'cassandra.git'])
subprocess.call(['git', 'fetch', 'origin'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', 'trunk'], cwd='cassandra.git')
subprocess.call(['git', 'branch', '-D', 'libcassandra'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', '-b', 'libcassandra', 'cassandra-' + cassandra_version], cwd='cassandra.git')
# Run thrift
subprocess.call([thrift_bin, '--gen', 'cpp', 'cassandra.git/interface/cassandra.thrift'])
# This leaves the generated content in gen-cpp
subprocess.call(['/bin/bash', '-x', '-c', 'cp gen-cpp/* libgenthrift/'])
# There's an inconveniently named VERSION in cassandra_constants.h and
# cassandra_constants.cpp. These only affect the build of the library (they
# aren't included by any public headers), so we just need to undef VERSION for
# them.
subprocess.call(['cp', 'libgenthrift/cassandra_constants.h', 'libgenthrift/cassandra_constants.h.orig'])
subprocess.call(['sed', 's/std::string VERSION/#undef VERSION\\nstd::string VERSION/', 'libgenthrift/cassandra_constants.h.orig'], stdout=open('libgenthrift/cassandra_constants.h', 'w'))
|
<commit_before><commit_msg>Add script for generating libgenthrift Cassandra interface files using thrift.<commit_after>
|
#!/usr/bin/env python
import sys
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: generate-cassandra-thrift.py /path/to/bin/thrift cassandra-version"
sys.exit(-1)
thrift_bin = sys.argv[1]
cassandra_version = sys.argv[2]
# Sanity check our location
cur_dir_list = os.listdir( os.getcwd() )
if 'libcassandra' not in cur_dir_list or 'libgenthrift' not in cur_dir_list:
print "Run from the top-level libcassandra directory"
sys.exit(-1)
# Checkout / update cassandra checkout
if 'cassandra.git' not in cur_dir_list:
subprocess.call(['git', 'clone', 'git://git.apache.org/cassandra.git', 'cassandra.git'])
subprocess.call(['git', 'fetch', 'origin'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', 'trunk'], cwd='cassandra.git')
subprocess.call(['git', 'branch', '-D', 'libcassandra'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', '-b', 'libcassandra', 'cassandra-' + cassandra_version], cwd='cassandra.git')
# Run thrift
subprocess.call([thrift_bin, '--gen', 'cpp', 'cassandra.git/interface/cassandra.thrift'])
# This leaves the generated content in gen-cpp
subprocess.call(['/bin/bash', '-x', '-c', 'cp gen-cpp/* libgenthrift/'])
# There's an inconveniently named VERSION in cassandra_constants.h and
# cassandra_constants.cpp. These only affect the build of the library (they
# aren't included by any public headers), so we just need to undef VERSION for
# them.
subprocess.call(['cp', 'libgenthrift/cassandra_constants.h', 'libgenthrift/cassandra_constants.h.orig'])
subprocess.call(['sed', 's/std::string VERSION/#undef VERSION\\nstd::string VERSION/', 'libgenthrift/cassandra_constants.h.orig'], stdout=open('libgenthrift/cassandra_constants.h', 'w'))
|
Add script for generating libgenthrift Cassandra interface files using thrift.#!/usr/bin/env python
import sys
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: generate-cassandra-thrift.py /path/to/bin/thrift cassandra-version"
sys.exit(-1)
thrift_bin = sys.argv[1]
cassandra_version = sys.argv[2]
# Sanity check our location
cur_dir_list = os.listdir( os.getcwd() )
if 'libcassandra' not in cur_dir_list or 'libgenthrift' not in cur_dir_list:
print "Run from the top-level libcassandra directory"
sys.exit(-1)
# Checkout / update cassandra checkout
if 'cassandra.git' not in cur_dir_list:
subprocess.call(['git', 'clone', 'git://git.apache.org/cassandra.git', 'cassandra.git'])
subprocess.call(['git', 'fetch', 'origin'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', 'trunk'], cwd='cassandra.git')
subprocess.call(['git', 'branch', '-D', 'libcassandra'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', '-b', 'libcassandra', 'cassandra-' + cassandra_version], cwd='cassandra.git')
# Run thrift
subprocess.call([thrift_bin, '--gen', 'cpp', 'cassandra.git/interface/cassandra.thrift'])
# This leaves the generated content in gen-cpp
subprocess.call(['/bin/bash', '-x', '-c', 'cp gen-cpp/* libgenthrift/'])
# There's an inconveniently named VERSION in cassandra_constants.h and
# cassandra_constants.cpp. These only affect the build of the library (they
# aren't included by any public headers), so we just need to undef VERSION for
# them.
subprocess.call(['cp', 'libgenthrift/cassandra_constants.h', 'libgenthrift/cassandra_constants.h.orig'])
subprocess.call(['sed', 's/std::string VERSION/#undef VERSION\\nstd::string VERSION/', 'libgenthrift/cassandra_constants.h.orig'], stdout=open('libgenthrift/cassandra_constants.h', 'w'))
|
<commit_before><commit_msg>Add script for generating libgenthrift Cassandra interface files using thrift.<commit_after>#!/usr/bin/env python
import sys
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: generate-cassandra-thrift.py /path/to/bin/thrift cassandra-version"
sys.exit(-1)
thrift_bin = sys.argv[1]
cassandra_version = sys.argv[2]
# Sanity check our location
cur_dir_list = os.listdir( os.getcwd() )
if 'libcassandra' not in cur_dir_list or 'libgenthrift' not in cur_dir_list:
print "Run from the top-level libcassandra directory"
sys.exit(-1)
# Checkout / update cassandra checkout
if 'cassandra.git' not in cur_dir_list:
subprocess.call(['git', 'clone', 'git://git.apache.org/cassandra.git', 'cassandra.git'])
subprocess.call(['git', 'fetch', 'origin'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', 'trunk'], cwd='cassandra.git')
subprocess.call(['git', 'branch', '-D', 'libcassandra'], cwd='cassandra.git')
subprocess.call(['git', 'checkout', '-b', 'libcassandra', 'cassandra-' + cassandra_version], cwd='cassandra.git')
# Run thrift
subprocess.call([thrift_bin, '--gen', 'cpp', 'cassandra.git/interface/cassandra.thrift'])
# This leaves the generated content in gen-cpp
subprocess.call(['/bin/bash', '-x', '-c', 'cp gen-cpp/* libgenthrift/'])
# There's an inconveniently named VERSION in cassandra_constants.h and
# cassandra_constants.cpp. These only affect the build of the library (they
# aren't included by any public headers), so we just need to undef VERSION for
# them.
subprocess.call(['cp', 'libgenthrift/cassandra_constants.h', 'libgenthrift/cassandra_constants.h.orig'])
subprocess.call(['sed', 's/std::string VERSION/#undef VERSION\\nstd::string VERSION/', 'libgenthrift/cassandra_constants.h.orig'], stdout=open('libgenthrift/cassandra_constants.h', 'w'))
|
|
4b38f9d3760c7e05665436e1e562909db8bfd5d0
|
xgb_cv3.py
|
xgb_cv3.py
|
# WITH CATEGORICAL VARIABLES CONVERTED TO CON_PROB VARIABLES
# import libraries
import pandas as pd
import xgboost as xgb
import re
# %matplotlib inline
# import logger.py
from logger import logger
logger.info('Start modelling.py')
# load xgb matrix from binary
train = xgb.DMatrix('train_proc_full3.buffer')
logger.info('xgb_matrix loaded')
# Run xgb
# initialize xgb params
param = {'eta': 0.0375,
'gamma': 0.75,
'max_depth': 14,
'min_child_weight': 15,
'sub_sample': 0.85,
'colsample_bytree': 0.75,
'alpha': 3,
'objective': 'binary:logistic',
'eval_metric': 'auc',
'seed': 0}
num_round = 2000
auc_hist = xgb.cv(params = param, dtrain = train, num_boost_round = num_round,
nfold = 5, seed = 668, show_stdv = False)
### get auc from train and test
# extract auc from string values
auc_test = {}
auc_train = {}
for row, auc in enumerate(auc_hist):
auc_test[row] = re.search(r'cv-test-auc:(.*)\s', auc).group(1)
auc_train[row] = re.search(r'cv-train-auc:(.*)', auc).group(1)
# create auc dataframe
auc_test_df = pd.DataFrame(auc_test.items(), columns = ['rounds', 'auc_test'])
auc_train_df = pd.DataFrame(auc_train.items(), columns = ['rounds', 'auc_train'])
auc_df = auc_train_df.merge(auc_test_df, on = 'rounds')
auc_df = auc_df.astype('float')
auc_df.to_csv('auc_cv3.csv', index = False)
|
Add cross validation script for model.
|
Add cross validation script for model.
|
Python
|
mit
|
eugeneyan/kaggle_springleaf
|
Add cross validation script for model.
|
# WITH CATEGORICAL VARIABLES CONVERTED TO CON_PROB VARIABLES
# import libraries
import pandas as pd
import xgboost as xgb
import re
# %matplotlib inline
# import logger.py
from logger import logger
logger.info('Start modelling.py')
# load xgb matrix from binary
train = xgb.DMatrix('train_proc_full3.buffer')
logger.info('xgb_matrix loaded')
# Run xgb
# initialize xgb params
param = {'eta': 0.0375,
'gamma': 0.75,
'max_depth': 14,
'min_child_weight': 15,
'sub_sample': 0.85,
'colsample_bytree': 0.75,
'alpha': 3,
'objective': 'binary:logistic',
'eval_metric': 'auc',
'seed': 0}
num_round = 2000
auc_hist = xgb.cv(params = param, dtrain = train, num_boost_round = num_round,
nfold = 5, seed = 668, show_stdv = False)
### get auc from train and test
# extract auc from string values
auc_test = {}
auc_train = {}
for row, auc in enumerate(auc_hist):
auc_test[row] = re.search(r'cv-test-auc:(.*)\s', auc).group(1)
auc_train[row] = re.search(r'cv-train-auc:(.*)', auc).group(1)
# create auc dataframe
auc_test_df = pd.DataFrame(auc_test.items(), columns = ['rounds', 'auc_test'])
auc_train_df = pd.DataFrame(auc_train.items(), columns = ['rounds', 'auc_train'])
auc_df = auc_train_df.merge(auc_test_df, on = 'rounds')
auc_df = auc_df.astype('float')
auc_df.to_csv('auc_cv3.csv', index = False)
|
<commit_before><commit_msg>Add cross validation script for model.<commit_after>
|
# WITH CATEGORICAL VARIABLES CONVERTED TO CON_PROB VARIABLES
# import libraries
import pandas as pd
import xgboost as xgb
import re
# %matplotlib inline
# import logger.py
from logger import logger
logger.info('Start modelling.py')
# load xgb matrix from binary
train = xgb.DMatrix('train_proc_full3.buffer')
logger.info('xgb_matrix loaded')
# Run xgb
# initialize xgb params
param = {'eta': 0.0375,
'gamma': 0.75,
'max_depth': 14,
'min_child_weight': 15,
'sub_sample': 0.85,
'colsample_bytree': 0.75,
'alpha': 3,
'objective': 'binary:logistic',
'eval_metric': 'auc',
'seed': 0}
num_round = 2000
auc_hist = xgb.cv(params = param, dtrain = train, num_boost_round = num_round,
nfold = 5, seed = 668, show_stdv = False)
### get auc from train and test
# extract auc from string values
auc_test = {}
auc_train = {}
for row, auc in enumerate(auc_hist):
auc_test[row] = re.search(r'cv-test-auc:(.*)\s', auc).group(1)
auc_train[row] = re.search(r'cv-train-auc:(.*)', auc).group(1)
# create auc dataframe
auc_test_df = pd.DataFrame(auc_test.items(), columns = ['rounds', 'auc_test'])
auc_train_df = pd.DataFrame(auc_train.items(), columns = ['rounds', 'auc_train'])
auc_df = auc_train_df.merge(auc_test_df, on = 'rounds')
auc_df = auc_df.astype('float')
auc_df.to_csv('auc_cv3.csv', index = False)
|
Add cross validation script for model.# WITH CATEGORICAL VARIABLES CONVERTED TO CON_PROB VARIABLES
# import libraries
import pandas as pd
import xgboost as xgb
import re
# %matplotlib inline
# import logger.py
from logger import logger
logger.info('Start modelling.py')
# load xgb matrix from binary
train = xgb.DMatrix('train_proc_full3.buffer')
logger.info('xgb_matrix loaded')
# Run xgb
# initialize xgb params
param = {'eta': 0.0375,
'gamma': 0.75,
'max_depth': 14,
'min_child_weight': 15,
'sub_sample': 0.85,
'colsample_bytree': 0.75,
'alpha': 3,
'objective': 'binary:logistic',
'eval_metric': 'auc',
'seed': 0}
num_round = 2000
auc_hist = xgb.cv(params = param, dtrain = train, num_boost_round = num_round,
nfold = 5, seed = 668, show_stdv = False)
### get auc from train and test
# extract auc from string values
auc_test = {}
auc_train = {}
for row, auc in enumerate(auc_hist):
auc_test[row] = re.search(r'cv-test-auc:(.*)\s', auc).group(1)
auc_train[row] = re.search(r'cv-train-auc:(.*)', auc).group(1)
# create auc dataframe
auc_test_df = pd.DataFrame(auc_test.items(), columns = ['rounds', 'auc_test'])
auc_train_df = pd.DataFrame(auc_train.items(), columns = ['rounds', 'auc_train'])
auc_df = auc_train_df.merge(auc_test_df, on = 'rounds')
auc_df = auc_df.astype('float')
auc_df.to_csv('auc_cv3.csv', index = False)
|
<commit_before><commit_msg>Add cross validation script for model.<commit_after># WITH CATEGORICAL VARIABLES CONVERTED TO CON_PROB VARIABLES
# import libraries
import pandas as pd
import xgboost as xgb
import re
# %matplotlib inline
# import logger.py
from logger import logger
logger.info('Start modelling.py')
# load xgb matrix from binary
train = xgb.DMatrix('train_proc_full3.buffer')
logger.info('xgb_matrix loaded')
# Run xgb
# initialize xgb params
param = {'eta': 0.0375,
'gamma': 0.75,
'max_depth': 14,
'min_child_weight': 15,
'sub_sample': 0.85,
'colsample_bytree': 0.75,
'alpha': 3,
'objective': 'binary:logistic',
'eval_metric': 'auc',
'seed': 0}
num_round = 2000
auc_hist = xgb.cv(params = param, dtrain = train, num_boost_round = num_round,
nfold = 5, seed = 668, show_stdv = False)
### get auc from train and test
# extract auc from string values
auc_test = {}
auc_train = {}
for row, auc in enumerate(auc_hist):
auc_test[row] = re.search(r'cv-test-auc:(.*)\s', auc).group(1)
auc_train[row] = re.search(r'cv-train-auc:(.*)', auc).group(1)
# create auc dataframe
auc_test_df = pd.DataFrame(auc_test.items(), columns = ['rounds', 'auc_test'])
auc_train_df = pd.DataFrame(auc_train.items(), columns = ['rounds', 'auc_train'])
auc_df = auc_train_df.merge(auc_test_df, on = 'rounds')
auc_df = auc_df.astype('float')
auc_df.to_csv('auc_cv3.csv', index = False)
|
|
cdb46409e1e5dc8e0766450f98f05278afeba1d4
|
molo/core/management/commands/remove_all_featured_articles.py
|
molo/core/management/commands/remove_all_featured_articles.py
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
article_pages = ArticlePage.objects.all()
for article in article_pages:
if article.featured_in_latest or \
article.featured_in_latest_start_date or \
article.featured_in_latest_end_date:
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
print "Promoted Article in latest--->", article.title, "<---"
if article.featured_in_homepage or \
article.featured_in_homepage_start_date or \
article.featured_in_homepage_end_date:
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
print "Promoted Article in Home Page--->", article.title, "<---"
|
Add remove all featured_articles management command
|
Add remove all featured_articles management command
|
Python
|
bsd-2-clause
|
praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo
|
Add remove all featured_articles management command
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
article_pages = ArticlePage.objects.all()
for article in article_pages:
if article.featured_in_latest or \
article.featured_in_latest_start_date or \
article.featured_in_latest_end_date:
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
print "Promoted Article in latest--->", article.title, "<---"
if article.featured_in_homepage or \
article.featured_in_homepage_start_date or \
article.featured_in_homepage_end_date:
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
print "Promoted Article in Home Page--->", article.title, "<---"
|
<commit_before><commit_msg>Add remove all featured_articles management command<commit_after>
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
article_pages = ArticlePage.objects.all()
for article in article_pages:
if article.featured_in_latest or \
article.featured_in_latest_start_date or \
article.featured_in_latest_end_date:
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
print "Promoted Article in latest--->", article.title, "<---"
if article.featured_in_homepage or \
article.featured_in_homepage_start_date or \
article.featured_in_homepage_end_date:
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
print "Promoted Article in Home Page--->", article.title, "<---"
|
Add remove all featured_articles management commandfrom __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
article_pages = ArticlePage.objects.all()
for article in article_pages:
if article.featured_in_latest or \
article.featured_in_latest_start_date or \
article.featured_in_latest_end_date:
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
print "Promoted Article in latest--->", article.title, "<---"
if article.featured_in_homepage or \
article.featured_in_homepage_start_date or \
article.featured_in_homepage_end_date:
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
print "Promoted Article in Home Page--->", article.title, "<---"
|
<commit_before><commit_msg>Add remove all featured_articles management command<commit_after>from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
article_pages = ArticlePage.objects.all()
for article in article_pages:
if article.featured_in_latest or \
article.featured_in_latest_start_date or \
article.featured_in_latest_end_date:
article.featured_in_latest = False
article.featured_in_latest_start_date = None
article.featured_in_latest_end_date = None
article.save_revision().publish()
print "Promoted Article in latest--->", article.title, "<---"
if article.featured_in_homepage or \
article.featured_in_homepage_start_date or \
article.featured_in_homepage_end_date:
article.featured_in_homepage = False
article.featured_in_homepage_start_date = None
article.featured_in_homepage_end_date = None
article.save_revision().publish()
print "Promoted Article in Home Page--->", article.title, "<---"
|
|
70d822c9f0ab799ab44e3707ba345782b36fad3f
|
zvm/zstring.py
|
zvm/zstring.py
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Add an unfinished version of the ZString stream translator.
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.
|
Python
|
bsd-3-clause
|
BGCX262/zvm-hg-to-git,BGCX262/zvm-hg-to-git
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
<commit_before><commit_msg>Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.<commit_after>
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
<commit_before><commit_msg>Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.<commit_after>#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
|
901954e944fad1ea1b9aa3510646814397712757
|
d1_libclient_python/src/examples/compare_object_lists.py
|
d1_libclient_python/src/examples/compare_object_lists.py
|
#!/usr/bin/env python
import logging
import d1_client.objectlistiterator
import d1_client.mnclient_2_0
import d1_client.cnclient_2_0
import d1_common.const
# Check for discrepancies between MN and CN by comparing object lists
#CN_BASE_URL = d1_common.const.URL_CN_BASE_URL
CN_BASE_URL = 'https://cn-stage.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-sandbox.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-dev.test.dataone.org/cn'
NODE_ID = 'urn:node:mnTestR2R'
def main():
logging.basicConfig(level=logging.INFO)
node_pyxb = find_node(NODE_ID)
if node_pyxb is None:
print 'Node not found: {}'.format(NODE_ID)
return
if node_pyxb.type != 'mn':
print 'Expected NodeID be for an MN. Found a {}'.format(node_pyxb.type.upper())
return
print 'BaseURL: {}'.format(node_pyxb.baseURL)
mn_base_url = node_pyxb.baseURL
mn_client = d1_client.mnclient_2_0.MemberNodeClient_2_0(mn_base_url)
pid_a_dict = get_object_dict(mn_client)
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
pid_b_dict = get_object_dict(cn_client, node_id=NODE_ID)
dump_unique(pid_a_dict, pid_b_dict, CN_BASE_URL)
dump_unique(pid_b_dict, pid_a_dict, BASE_URL_B)
def dump_unique(from_dict, not_in_dict, base_url):
only_pid_set = set(from_dict.keys()).difference(set(not_in_dict.keys()))
print '{} only in {}:'.format(len(only_pid_set), base_url)
for pid_str in sorted(only_pid_set, key=lambda x: from_dict[x].dateSysMetadataModified):
print ' {} {}'.format(pid_str, from_dict[pid_str].dateSysMetadataModified)
def get_object_dict(client, node_id=None):
pid_dict = {}
for object_info in d1_client.objectlistiterator.ObjectListIterator(client, nodeId=node_id):
pid_dict[object_info.identifier.value()] = object_info
return pid_dict
def find_node(node_id):
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
for node_pyxb in cn_client.listNodes().node:
if node_pyxb.identifier.value() == node_id:
return node_pyxb
if __name__ == '__main__':
main()
|
Add example on how to read and compare ObjectLists
|
Add example on how to read and compare ObjectLists
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add example on how to read and compare ObjectLists
|
#!/usr/bin/env python
import logging
import d1_client.objectlistiterator
import d1_client.mnclient_2_0
import d1_client.cnclient_2_0
import d1_common.const
# Check for discrepancies between MN and CN by comparing object lists
#CN_BASE_URL = d1_common.const.URL_CN_BASE_URL
CN_BASE_URL = 'https://cn-stage.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-sandbox.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-dev.test.dataone.org/cn'
NODE_ID = 'urn:node:mnTestR2R'
def main():
logging.basicConfig(level=logging.INFO)
node_pyxb = find_node(NODE_ID)
if node_pyxb is None:
print 'Node not found: {}'.format(NODE_ID)
return
if node_pyxb.type != 'mn':
print 'Expected NodeID be for an MN. Found a {}'.format(node_pyxb.type.upper())
return
print 'BaseURL: {}'.format(node_pyxb.baseURL)
mn_base_url = node_pyxb.baseURL
mn_client = d1_client.mnclient_2_0.MemberNodeClient_2_0(mn_base_url)
pid_a_dict = get_object_dict(mn_client)
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
pid_b_dict = get_object_dict(cn_client, node_id=NODE_ID)
dump_unique(pid_a_dict, pid_b_dict, CN_BASE_URL)
dump_unique(pid_b_dict, pid_a_dict, BASE_URL_B)
def dump_unique(from_dict, not_in_dict, base_url):
only_pid_set = set(from_dict.keys()).difference(set(not_in_dict.keys()))
print '{} only in {}:'.format(len(only_pid_set), base_url)
for pid_str in sorted(only_pid_set, key=lambda x: from_dict[x].dateSysMetadataModified):
print ' {} {}'.format(pid_str, from_dict[pid_str].dateSysMetadataModified)
def get_object_dict(client, node_id=None):
pid_dict = {}
for object_info in d1_client.objectlistiterator.ObjectListIterator(client, nodeId=node_id):
pid_dict[object_info.identifier.value()] = object_info
return pid_dict
def find_node(node_id):
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
for node_pyxb in cn_client.listNodes().node:
if node_pyxb.identifier.value() == node_id:
return node_pyxb
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example on how to read and compare ObjectLists<commit_after>
|
#!/usr/bin/env python
import logging
import d1_client.objectlistiterator
import d1_client.mnclient_2_0
import d1_client.cnclient_2_0
import d1_common.const
# Check for discrepancies between MN and CN by comparing object lists
#CN_BASE_URL = d1_common.const.URL_CN_BASE_URL
CN_BASE_URL = 'https://cn-stage.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-sandbox.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-dev.test.dataone.org/cn'
NODE_ID = 'urn:node:mnTestR2R'
def main():
logging.basicConfig(level=logging.INFO)
node_pyxb = find_node(NODE_ID)
if node_pyxb is None:
print 'Node not found: {}'.format(NODE_ID)
return
if node_pyxb.type != 'mn':
print 'Expected NodeID be for an MN. Found a {}'.format(node_pyxb.type.upper())
return
print 'BaseURL: {}'.format(node_pyxb.baseURL)
mn_base_url = node_pyxb.baseURL
mn_client = d1_client.mnclient_2_0.MemberNodeClient_2_0(mn_base_url)
pid_a_dict = get_object_dict(mn_client)
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
pid_b_dict = get_object_dict(cn_client, node_id=NODE_ID)
dump_unique(pid_a_dict, pid_b_dict, CN_BASE_URL)
dump_unique(pid_b_dict, pid_a_dict, BASE_URL_B)
def dump_unique(from_dict, not_in_dict, base_url):
only_pid_set = set(from_dict.keys()).difference(set(not_in_dict.keys()))
print '{} only in {}:'.format(len(only_pid_set), base_url)
for pid_str in sorted(only_pid_set, key=lambda x: from_dict[x].dateSysMetadataModified):
print ' {} {}'.format(pid_str, from_dict[pid_str].dateSysMetadataModified)
def get_object_dict(client, node_id=None):
pid_dict = {}
for object_info in d1_client.objectlistiterator.ObjectListIterator(client, nodeId=node_id):
pid_dict[object_info.identifier.value()] = object_info
return pid_dict
def find_node(node_id):
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
for node_pyxb in cn_client.listNodes().node:
if node_pyxb.identifier.value() == node_id:
return node_pyxb
if __name__ == '__main__':
main()
|
Add example on how to read and compare ObjectLists#!/usr/bin/env python
import logging
import d1_client.objectlistiterator
import d1_client.mnclient_2_0
import d1_client.cnclient_2_0
import d1_common.const
# Check for discrepancies between MN and CN by comparing object lists
#CN_BASE_URL = d1_common.const.URL_CN_BASE_URL
CN_BASE_URL = 'https://cn-stage.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-sandbox.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-dev.test.dataone.org/cn'
NODE_ID = 'urn:node:mnTestR2R'
def main():
logging.basicConfig(level=logging.INFO)
node_pyxb = find_node(NODE_ID)
if node_pyxb is None:
print 'Node not found: {}'.format(NODE_ID)
return
if node_pyxb.type != 'mn':
print 'Expected NodeID be for an MN. Found a {}'.format(node_pyxb.type.upper())
return
print 'BaseURL: {}'.format(node_pyxb.baseURL)
mn_base_url = node_pyxb.baseURL
mn_client = d1_client.mnclient_2_0.MemberNodeClient_2_0(mn_base_url)
pid_a_dict = get_object_dict(mn_client)
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
pid_b_dict = get_object_dict(cn_client, node_id=NODE_ID)
dump_unique(pid_a_dict, pid_b_dict, CN_BASE_URL)
dump_unique(pid_b_dict, pid_a_dict, BASE_URL_B)
def dump_unique(from_dict, not_in_dict, base_url):
only_pid_set = set(from_dict.keys()).difference(set(not_in_dict.keys()))
print '{} only in {}:'.format(len(only_pid_set), base_url)
for pid_str in sorted(only_pid_set, key=lambda x: from_dict[x].dateSysMetadataModified):
print ' {} {}'.format(pid_str, from_dict[pid_str].dateSysMetadataModified)
def get_object_dict(client, node_id=None):
pid_dict = {}
for object_info in d1_client.objectlistiterator.ObjectListIterator(client, nodeId=node_id):
pid_dict[object_info.identifier.value()] = object_info
return pid_dict
def find_node(node_id):
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
for node_pyxb in cn_client.listNodes().node:
if node_pyxb.identifier.value() == node_id:
return node_pyxb
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example on how to read and compare ObjectLists<commit_after>#!/usr/bin/env python
import logging
import d1_client.objectlistiterator
import d1_client.mnclient_2_0
import d1_client.cnclient_2_0
import d1_common.const
# Check for discrepancies between MN and CN by comparing object lists
#CN_BASE_URL = d1_common.const.URL_CN_BASE_URL
CN_BASE_URL = 'https://cn-stage.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-sandbox.test.dataone.org/cn'
#CN_BASE_URL = 'https://cn-dev.test.dataone.org/cn'
NODE_ID = 'urn:node:mnTestR2R'
def main():
logging.basicConfig(level=logging.INFO)
node_pyxb = find_node(NODE_ID)
if node_pyxb is None:
print 'Node not found: {}'.format(NODE_ID)
return
if node_pyxb.type != 'mn':
print 'Expected NodeID be for an MN. Found a {}'.format(node_pyxb.type.upper())
return
print 'BaseURL: {}'.format(node_pyxb.baseURL)
mn_base_url = node_pyxb.baseURL
mn_client = d1_client.mnclient_2_0.MemberNodeClient_2_0(mn_base_url)
pid_a_dict = get_object_dict(mn_client)
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
pid_b_dict = get_object_dict(cn_client, node_id=NODE_ID)
dump_unique(pid_a_dict, pid_b_dict, CN_BASE_URL)
dump_unique(pid_b_dict, pid_a_dict, BASE_URL_B)
def dump_unique(from_dict, not_in_dict, base_url):
only_pid_set = set(from_dict.keys()).difference(set(not_in_dict.keys()))
print '{} only in {}:'.format(len(only_pid_set), base_url)
for pid_str in sorted(only_pid_set, key=lambda x: from_dict[x].dateSysMetadataModified):
print ' {} {}'.format(pid_str, from_dict[pid_str].dateSysMetadataModified)
def get_object_dict(client, node_id=None):
pid_dict = {}
for object_info in d1_client.objectlistiterator.ObjectListIterator(client, nodeId=node_id):
pid_dict[object_info.identifier.value()] = object_info
return pid_dict
def find_node(node_id):
cn_client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(CN_BASE_URL)
for node_pyxb in cn_client.listNodes().node:
if node_pyxb.identifier.value() == node_id:
return node_pyxb
if __name__ == '__main__':
main()
|
|
1b7fc6c8037b63f7fca8c99b97ccbb1871ab33c1
|
test/targetmachines.py
|
test/targetmachines.py
|
from llvm.core import *
from llvm.ee import TargetMachine, EngineBuilder, print_registered_targets
import unittest
class TestTargetMachines(unittest.TestCase):
'''Exercise target machines
Require PTX backend
'''
def test_native(self):
m, _ = self._build_module()
tm = EngineBuilder.new(m).select_target()
self.assertTrue(tm.target_name)
self.assertTrue(tm.target_data)
self.assertTrue(tm.target_short_description)
self.assertTrue(tm.triple)
self.assertIn('foo', tm.emit_assembly(m).decode('utf-8'))
def test_ptx(self):
m, func = self._build_module()
func.calling_convention = CC_PTX_KERNEL # set calling conv
ptxtm = TargetMachine.lookup(arch='ptx64', cpu='compute_20',
features='-double')
self.assertTrue(ptxtm.triple)
self.assertTrue(ptxtm.cpu)
self.assertTrue(ptxtm.feature_string)
ptxasm = ptxtm.emit_assembly(m).decode('utf-8')
self.assertIn('foo', ptxasm)
self.assertIn('map_f64_to_f32', ptxasm)
self.assertIn('compute_10', ptxasm)
def _build_module(self):
m = Module.new('TestTargetMachines')
fnty = Type.function(Type.void(), [])
func = m.add_function(fnty, name='foo')
bldr = Builder.new(func.append_basic_block('entry'))
bldr.ret_void()
m.verify()
return m, func
if __name__ == '__main__':
unittest.main()
|
Add test for target machines (native + PTX)
|
Add test for target machines (native + PTX)
|
Python
|
bsd-3-clause
|
llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy
|
Add test for target machines (native + PTX)
|
from llvm.core import *
from llvm.ee import TargetMachine, EngineBuilder, print_registered_targets
import unittest
class TestTargetMachines(unittest.TestCase):
'''Exercise target machines
Require PTX backend
'''
def test_native(self):
m, _ = self._build_module()
tm = EngineBuilder.new(m).select_target()
self.assertTrue(tm.target_name)
self.assertTrue(tm.target_data)
self.assertTrue(tm.target_short_description)
self.assertTrue(tm.triple)
self.assertIn('foo', tm.emit_assembly(m).decode('utf-8'))
def test_ptx(self):
m, func = self._build_module()
func.calling_convention = CC_PTX_KERNEL # set calling conv
ptxtm = TargetMachine.lookup(arch='ptx64', cpu='compute_20',
features='-double')
self.assertTrue(ptxtm.triple)
self.assertTrue(ptxtm.cpu)
self.assertTrue(ptxtm.feature_string)
ptxasm = ptxtm.emit_assembly(m).decode('utf-8')
self.assertIn('foo', ptxasm)
self.assertIn('map_f64_to_f32', ptxasm)
self.assertIn('compute_10', ptxasm)
def _build_module(self):
m = Module.new('TestTargetMachines')
fnty = Type.function(Type.void(), [])
func = m.add_function(fnty, name='foo')
bldr = Builder.new(func.append_basic_block('entry'))
bldr.ret_void()
m.verify()
return m, func
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for target machines (native + PTX)<commit_after>
|
from llvm.core import *
from llvm.ee import TargetMachine, EngineBuilder, print_registered_targets
import unittest
class TestTargetMachines(unittest.TestCase):
'''Exercise target machines
Require PTX backend
'''
def test_native(self):
m, _ = self._build_module()
tm = EngineBuilder.new(m).select_target()
self.assertTrue(tm.target_name)
self.assertTrue(tm.target_data)
self.assertTrue(tm.target_short_description)
self.assertTrue(tm.triple)
self.assertIn('foo', tm.emit_assembly(m).decode('utf-8'))
def test_ptx(self):
m, func = self._build_module()
func.calling_convention = CC_PTX_KERNEL # set calling conv
ptxtm = TargetMachine.lookup(arch='ptx64', cpu='compute_20',
features='-double')
self.assertTrue(ptxtm.triple)
self.assertTrue(ptxtm.cpu)
self.assertTrue(ptxtm.feature_string)
ptxasm = ptxtm.emit_assembly(m).decode('utf-8')
self.assertIn('foo', ptxasm)
self.assertIn('map_f64_to_f32', ptxasm)
self.assertIn('compute_10', ptxasm)
def _build_module(self):
m = Module.new('TestTargetMachines')
fnty = Type.function(Type.void(), [])
func = m.add_function(fnty, name='foo')
bldr = Builder.new(func.append_basic_block('entry'))
bldr.ret_void()
m.verify()
return m, func
if __name__ == '__main__':
unittest.main()
|
Add test for target machines (native + PTX)from llvm.core import *
from llvm.ee import TargetMachine, EngineBuilder, print_registered_targets
import unittest
class TestTargetMachines(unittest.TestCase):
'''Exercise target machines
Require PTX backend
'''
def test_native(self):
m, _ = self._build_module()
tm = EngineBuilder.new(m).select_target()
self.assertTrue(tm.target_name)
self.assertTrue(tm.target_data)
self.assertTrue(tm.target_short_description)
self.assertTrue(tm.triple)
self.assertIn('foo', tm.emit_assembly(m).decode('utf-8'))
def test_ptx(self):
m, func = self._build_module()
func.calling_convention = CC_PTX_KERNEL # set calling conv
ptxtm = TargetMachine.lookup(arch='ptx64', cpu='compute_20',
features='-double')
self.assertTrue(ptxtm.triple)
self.assertTrue(ptxtm.cpu)
self.assertTrue(ptxtm.feature_string)
ptxasm = ptxtm.emit_assembly(m).decode('utf-8')
self.assertIn('foo', ptxasm)
self.assertIn('map_f64_to_f32', ptxasm)
self.assertIn('compute_10', ptxasm)
def _build_module(self):
m = Module.new('TestTargetMachines')
fnty = Type.function(Type.void(), [])
func = m.add_function(fnty, name='foo')
bldr = Builder.new(func.append_basic_block('entry'))
bldr.ret_void()
m.verify()
return m, func
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for target machines (native + PTX)<commit_after>from llvm.core import *
from llvm.ee import TargetMachine, EngineBuilder, print_registered_targets
import unittest
class TestTargetMachines(unittest.TestCase):
'''Exercise target machines
Require PTX backend
'''
def test_native(self):
m, _ = self._build_module()
tm = EngineBuilder.new(m).select_target()
self.assertTrue(tm.target_name)
self.assertTrue(tm.target_data)
self.assertTrue(tm.target_short_description)
self.assertTrue(tm.triple)
self.assertIn('foo', tm.emit_assembly(m).decode('utf-8'))
def test_ptx(self):
m, func = self._build_module()
func.calling_convention = CC_PTX_KERNEL # set calling conv
ptxtm = TargetMachine.lookup(arch='ptx64', cpu='compute_20',
features='-double')
self.assertTrue(ptxtm.triple)
self.assertTrue(ptxtm.cpu)
self.assertTrue(ptxtm.feature_string)
ptxasm = ptxtm.emit_assembly(m).decode('utf-8')
self.assertIn('foo', ptxasm)
self.assertIn('map_f64_to_f32', ptxasm)
self.assertIn('compute_10', ptxasm)
def _build_module(self):
m = Module.new('TestTargetMachines')
fnty = Type.function(Type.void(), [])
func = m.add_function(fnty, name='foo')
bldr = Builder.new(func.append_basic_block('entry'))
bldr.ret_void()
m.verify()
return m, func
if __name__ == '__main__':
unittest.main()
|
|
ad1522445128ed70c4cb7991731938ab5c1df75b
|
tests/vmtests/vmbackedtestcase.py
|
tests/vmtests/vmbackedtestcase.py
|
import os
import unittest
from blivet import Blivet
@unittest.skipUnless(os.environ.get("VM_ENVIRONMENT"), "vm only test")
@unittest.skipUnless(os.geteuid() == 0, "requires root access")
class VMBackedTestCase(unittest.TestCase):
""" A class to encapsulate testing of blivet using block devices.
The basic idea is you create some scratch block devices and then run
some test code on them.
:attr:`~.ImageBackedTestCase.disks` defines the set of disk images.
:meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the
initial layout of the disks. It will be written to the disk images in
:meth:`~.ImageBackedTestCase.set_up_storage`.
You then write test methods as usual that use the disk images, which
will be cleaned up and removed when each test method finishes.
"""
initialize_disks = True # Whether or not to create a disklabel on the disks.
def set_up_disks(self):
""" Create disk image files to build the test's storage on.
If you are actually creating the disk image files here don't forget
to set the initialize_disks flag so they get a fresh disklabel when
clear_partitions gets called from create_storage later.
"""
pass
def _set_up_storage(self):
""" Schedule creation of storage devices on the disk images.
.. note::
The disk images should already be in a populated devicetree.
"""
pass
def set_up_storage(self):
""" Create a device stack on top of disk images for this test to run on.
This will write the configuration to whatever disk images are
defined in set_up_disks.
"""
#
# create disk images
#
self.set_up_disks()
#
# populate the devicetree
#
self.blivet.reset()
if self.initialize_disks:
for disk in self.blivet.disks:
self.blivet.initialize_disk(disk)
#
# create the rest of the stack
#
self._set_up_storage()
#
# write configuration to disk images
#
self.blivet.do_it()
def setUp(self):
""" Do any setup required prior to running a test. """
self.blivet = Blivet()
self.addCleanup(self._clean_up)
self.set_up_storage()
def _clean_up(self):
""" Clean up any resources that may have been set up for a test. """
self.blivet.reset()
|
Add a test case for tests running in a virtual machine
|
Add a test case for tests running in a virtual machine
|
Python
|
lgpl-2.1
|
AdamWill/blivet,rvykydal/blivet,rvykydal/blivet,AdamWill/blivet,vojtechtrefny/blivet,jkonecny12/blivet,jkonecny12/blivet,vojtechtrefny/blivet
|
Add a test case for tests running in a virtual machine
|
import os
import unittest
from blivet import Blivet
@unittest.skipUnless(os.environ.get("VM_ENVIRONMENT"), "vm only test")
@unittest.skipUnless(os.geteuid() == 0, "requires root access")
class VMBackedTestCase(unittest.TestCase):
""" A class to encapsulate testing of blivet using block devices.
The basic idea is you create some scratch block devices and then run
some test code on them.
:attr:`~.ImageBackedTestCase.disks` defines the set of disk images.
:meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the
initial layout of the disks. It will be written to the disk images in
:meth:`~.ImageBackedTestCase.set_up_storage`.
You then write test methods as usual that use the disk images, which
will be cleaned up and removed when each test method finishes.
"""
initialize_disks = True # Whether or not to create a disklabel on the disks.
def set_up_disks(self):
""" Create disk image files to build the test's storage on.
If you are actually creating the disk image files here don't forget
to set the initialize_disks flag so they get a fresh disklabel when
clear_partitions gets called from create_storage later.
"""
pass
def _set_up_storage(self):
""" Schedule creation of storage devices on the disk images.
.. note::
The disk images should already be in a populated devicetree.
"""
pass
def set_up_storage(self):
""" Create a device stack on top of disk images for this test to run on.
This will write the configuration to whatever disk images are
defined in set_up_disks.
"""
#
# create disk images
#
self.set_up_disks()
#
# populate the devicetree
#
self.blivet.reset()
if self.initialize_disks:
for disk in self.blivet.disks:
self.blivet.initialize_disk(disk)
#
# create the rest of the stack
#
self._set_up_storage()
#
# write configuration to disk images
#
self.blivet.do_it()
def setUp(self):
""" Do any setup required prior to running a test. """
self.blivet = Blivet()
self.addCleanup(self._clean_up)
self.set_up_storage()
def _clean_up(self):
""" Clean up any resources that may have been set up for a test. """
self.blivet.reset()
|
<commit_before><commit_msg>Add a test case for tests running in a virtual machine<commit_after>
|
import os
import unittest
from blivet import Blivet
@unittest.skipUnless(os.environ.get("VM_ENVIRONMENT"), "vm only test")
@unittest.skipUnless(os.geteuid() == 0, "requires root access")
class VMBackedTestCase(unittest.TestCase):
""" A class to encapsulate testing of blivet using block devices.
The basic idea is you create some scratch block devices and then run
some test code on them.
:attr:`~.ImageBackedTestCase.disks` defines the set of disk images.
:meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the
initial layout of the disks. It will be written to the disk images in
:meth:`~.ImageBackedTestCase.set_up_storage`.
You then write test methods as usual that use the disk images, which
will be cleaned up and removed when each test method finishes.
"""
initialize_disks = True # Whether or not to create a disklabel on the disks.
def set_up_disks(self):
""" Create disk image files to build the test's storage on.
If you are actually creating the disk image files here don't forget
to set the initialize_disks flag so they get a fresh disklabel when
clear_partitions gets called from create_storage later.
"""
pass
def _set_up_storage(self):
""" Schedule creation of storage devices on the disk images.
.. note::
The disk images should already be in a populated devicetree.
"""
pass
def set_up_storage(self):
""" Create a device stack on top of disk images for this test to run on.
This will write the configuration to whatever disk images are
defined in set_up_disks.
"""
#
# create disk images
#
self.set_up_disks()
#
# populate the devicetree
#
self.blivet.reset()
if self.initialize_disks:
for disk in self.blivet.disks:
self.blivet.initialize_disk(disk)
#
# create the rest of the stack
#
self._set_up_storage()
#
# write configuration to disk images
#
self.blivet.do_it()
def setUp(self):
""" Do any setup required prior to running a test. """
self.blivet = Blivet()
self.addCleanup(self._clean_up)
self.set_up_storage()
def _clean_up(self):
""" Clean up any resources that may have been set up for a test. """
self.blivet.reset()
|
Add a test case for tests running in a virtual machine
import os
import unittest
from blivet import Blivet
@unittest.skipUnless(os.environ.get("VM_ENVIRONMENT"), "vm only test")
@unittest.skipUnless(os.geteuid() == 0, "requires root access")
class VMBackedTestCase(unittest.TestCase):
""" A class to encapsulate testing of blivet using block devices.
The basic idea is you create some scratch block devices and then run
some test code on them.
:attr:`~.ImageBackedTestCase.disks` defines the set of disk images.
:meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the
initial layout of the disks. It will be written to the disk images in
:meth:`~.ImageBackedTestCase.set_up_storage`.
You then write test methods as usual that use the disk images, which
will be cleaned up and removed when each test method finishes.
"""
initialize_disks = True # Whether or not to create a disklabel on the disks.
def set_up_disks(self):
""" Create disk image files to build the test's storage on.
If you are actually creating the disk image files here don't forget
to set the initialize_disks flag so they get a fresh disklabel when
clear_partitions gets called from create_storage later.
"""
pass
def _set_up_storage(self):
""" Schedule creation of storage devices on the disk images.
.. note::
The disk images should already be in a populated devicetree.
"""
pass
def set_up_storage(self):
""" Create a device stack on top of disk images for this test to run on.
This will write the configuration to whatever disk images are
defined in set_up_disks.
"""
#
# create disk images
#
self.set_up_disks()
#
# populate the devicetree
#
self.blivet.reset()
if self.initialize_disks:
for disk in self.blivet.disks:
self.blivet.initialize_disk(disk)
#
# create the rest of the stack
#
self._set_up_storage()
#
# write configuration to disk images
#
self.blivet.do_it()
def setUp(self):
""" Do any setup required prior to running a test. """
self.blivet = Blivet()
self.addCleanup(self._clean_up)
self.set_up_storage()
def _clean_up(self):
""" Clean up any resources that may have been set up for a test. """
self.blivet.reset()
|
<commit_before><commit_msg>Add a test case for tests running in a virtual machine<commit_after>
import os
import unittest
from blivet import Blivet
@unittest.skipUnless(os.environ.get("VM_ENVIRONMENT"), "vm only test")
@unittest.skipUnless(os.geteuid() == 0, "requires root access")
class VMBackedTestCase(unittest.TestCase):
""" A class to encapsulate testing of blivet using block devices.
The basic idea is you create some scratch block devices and then run
some test code on them.
:attr:`~.ImageBackedTestCase.disks` defines the set of disk images.
:meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the
initial layout of the disks. It will be written to the disk images in
:meth:`~.ImageBackedTestCase.set_up_storage`.
You then write test methods as usual that use the disk images, which
will be cleaned up and removed when each test method finishes.
"""
initialize_disks = True # Whether or not to create a disklabel on the disks.
def set_up_disks(self):
""" Create disk image files to build the test's storage on.
If you are actually creating the disk image files here don't forget
to set the initialize_disks flag so they get a fresh disklabel when
clear_partitions gets called from create_storage later.
"""
pass
def _set_up_storage(self):
""" Schedule creation of storage devices on the disk images.
.. note::
The disk images should already be in a populated devicetree.
"""
pass
def set_up_storage(self):
""" Create a device stack on top of disk images for this test to run on.
This will write the configuration to whatever disk images are
defined in set_up_disks.
"""
#
# create disk images
#
self.set_up_disks()
#
# populate the devicetree
#
self.blivet.reset()
if self.initialize_disks:
for disk in self.blivet.disks:
self.blivet.initialize_disk(disk)
#
# create the rest of the stack
#
self._set_up_storage()
#
# write configuration to disk images
#
self.blivet.do_it()
def setUp(self):
""" Do any setup required prior to running a test. """
self.blivet = Blivet()
self.addCleanup(self._clean_up)
self.set_up_storage()
def _clean_up(self):
""" Clean up any resources that may have been set up for a test. """
self.blivet.reset()
|
|
31e105ce31dc3a2243c5bfddab3609ef71c1b026
|
csunplugged/resources/utils/resource_parameters.py
|
csunplugged/resources/utils/resource_parameters.py
|
from lxml import etree
from django.utils.translation import ugettext as _
class ResourceParameter(object):
def __init__(self, name="", description=""):
self.name = name
self.description = description
def html_element(self):
legend = etree.Element('legend')
legend.text = self.description
fieldset = etree.Element('fieldset')
fieldset.append(legend)
return fieldset
class EnumResourceParameter(ResourceParameter):
def __init__(self, values=[], default=None, **kwargs):
super().__init__(**kwargs)
self.values = values
self.default = default
if self.default not in self.values:
raise Exception(self.values)
def html_element(self):
base_elem = super().html_element()
for value, value_desc in self.values.items():
input_elem = etree.Element(
'input',
type="radio",
name=self.name,
id='{}_{}'.format(self.name, value),
value=str(value)
)
if value == self.default:
input_elem.set("checked", "checked")
base_elem.append(input_elem)
label_elem = etree.Element(
"label",
)
label_elem.set("for", "{}_{}".format(self.name, value))
label_elem.text = value_desc
base_elem.append(label_elem)
base_elem.append(etree.Element('br'))
return base_elem
class BoolResourceParameter(EnumResourceParameter):
def __init__(self, default=True, true_text=_("Yes"), false_text=_("No"), **kwargs):
values = {
True: true_text,
False: false_text
}
super().__init__(values=values, default=default, **kwargs)
|
Add ResourceParameter class and subclasses to store parameter info
|
Add ResourceParameter class and subclasses to store parameter info
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add ResourceParameter class and subclasses to store parameter info
|
from lxml import etree
from django.utils.translation import ugettext as _
class ResourceParameter(object):
def __init__(self, name="", description=""):
self.name = name
self.description = description
def html_element(self):
legend = etree.Element('legend')
legend.text = self.description
fieldset = etree.Element('fieldset')
fieldset.append(legend)
return fieldset
class EnumResourceParameter(ResourceParameter):
def __init__(self, values=[], default=None, **kwargs):
super().__init__(**kwargs)
self.values = values
self.default = default
if self.default not in self.values:
raise Exception(self.values)
def html_element(self):
base_elem = super().html_element()
for value, value_desc in self.values.items():
input_elem = etree.Element(
'input',
type="radio",
name=self.name,
id='{}_{}'.format(self.name, value),
value=str(value)
)
if value == self.default:
input_elem.set("checked", "checked")
base_elem.append(input_elem)
label_elem = etree.Element(
"label",
)
label_elem.set("for", "{}_{}".format(self.name, value))
label_elem.text = value_desc
base_elem.append(label_elem)
base_elem.append(etree.Element('br'))
return base_elem
class BoolResourceParameter(EnumResourceParameter):
def __init__(self, default=True, true_text=_("Yes"), false_text=_("No"), **kwargs):
values = {
True: true_text,
False: false_text
}
super().__init__(values=values, default=default, **kwargs)
|
<commit_before><commit_msg>Add ResourceParameter class and subclasses to store parameter info<commit_after>
|
from lxml import etree
from django.utils.translation import ugettext as _
class ResourceParameter(object):
def __init__(self, name="", description=""):
self.name = name
self.description = description
def html_element(self):
legend = etree.Element('legend')
legend.text = self.description
fieldset = etree.Element('fieldset')
fieldset.append(legend)
return fieldset
class EnumResourceParameter(ResourceParameter):
def __init__(self, values=[], default=None, **kwargs):
super().__init__(**kwargs)
self.values = values
self.default = default
if self.default not in self.values:
raise Exception(self.values)
def html_element(self):
base_elem = super().html_element()
for value, value_desc in self.values.items():
input_elem = etree.Element(
'input',
type="radio",
name=self.name,
id='{}_{}'.format(self.name, value),
value=str(value)
)
if value == self.default:
input_elem.set("checked", "checked")
base_elem.append(input_elem)
label_elem = etree.Element(
"label",
)
label_elem.set("for", "{}_{}".format(self.name, value))
label_elem.text = value_desc
base_elem.append(label_elem)
base_elem.append(etree.Element('br'))
return base_elem
class BoolResourceParameter(EnumResourceParameter):
def __init__(self, default=True, true_text=_("Yes"), false_text=_("No"), **kwargs):
values = {
True: true_text,
False: false_text
}
super().__init__(values=values, default=default, **kwargs)
|
Add ResourceParameter class and subclasses to store parameter infofrom lxml import etree
from django.utils.translation import ugettext as _
class ResourceParameter(object):
def __init__(self, name="", description=""):
self.name = name
self.description = description
def html_element(self):
legend = etree.Element('legend')
legend.text = self.description
fieldset = etree.Element('fieldset')
fieldset.append(legend)
return fieldset
class EnumResourceParameter(ResourceParameter):
def __init__(self, values=[], default=None, **kwargs):
super().__init__(**kwargs)
self.values = values
self.default = default
if self.default not in self.values:
raise Exception(self.values)
def html_element(self):
base_elem = super().html_element()
for value, value_desc in self.values.items():
input_elem = etree.Element(
'input',
type="radio",
name=self.name,
id='{}_{}'.format(self.name, value),
value=str(value)
)
if value == self.default:
input_elem.set("checked", "checked")
base_elem.append(input_elem)
label_elem = etree.Element(
"label",
)
label_elem.set("for", "{}_{}".format(self.name, value))
label_elem.text = value_desc
base_elem.append(label_elem)
base_elem.append(etree.Element('br'))
return base_elem
class BoolResourceParameter(EnumResourceParameter):
def __init__(self, default=True, true_text=_("Yes"), false_text=_("No"), **kwargs):
values = {
True: true_text,
False: false_text
}
super().__init__(values=values, default=default, **kwargs)
|
<commit_before><commit_msg>Add ResourceParameter class and subclasses to store parameter info<commit_after>from lxml import etree
from django.utils.translation import ugettext as _
class ResourceParameter(object):
def __init__(self, name="", description=""):
self.name = name
self.description = description
def html_element(self):
legend = etree.Element('legend')
legend.text = self.description
fieldset = etree.Element('fieldset')
fieldset.append(legend)
return fieldset
class EnumResourceParameter(ResourceParameter):
def __init__(self, values=[], default=None, **kwargs):
super().__init__(**kwargs)
self.values = values
self.default = default
if self.default not in self.values:
raise Exception(self.values)
def html_element(self):
base_elem = super().html_element()
for value, value_desc in self.values.items():
input_elem = etree.Element(
'input',
type="radio",
name=self.name,
id='{}_{}'.format(self.name, value),
value=str(value)
)
if value == self.default:
input_elem.set("checked", "checked")
base_elem.append(input_elem)
label_elem = etree.Element(
"label",
)
label_elem.set("for", "{}_{}".format(self.name, value))
label_elem.text = value_desc
base_elem.append(label_elem)
base_elem.append(etree.Element('br'))
return base_elem
class BoolResourceParameter(EnumResourceParameter):
def __init__(self, default=True, true_text=_("Yes"), false_text=_("No"), **kwargs):
values = {
True: true_text,
False: false_text
}
super().__init__(values=values, default=default, **kwargs)
|
|
cdcb20d48a06e688e332b8f3964fa4430972eaab
|
django/website/logframe/tests/test_period_utils.py
|
django/website/logframe/tests/test_period_utils.py
|
from ..period_utils import get_month_shift
def test_get_month_shift_handles_december():
new_month, _ = get_month_shift(12, 1)
assert 12 == new_month
|
Add test for get_month_shift handling December
|
Add test for get_month_shift handling December
|
Python
|
agpl-3.0
|
aptivate/kashana,aptivate/kashana,aptivate/alfie,daniell/kashana,aptivate/alfie,daniell/kashana,aptivate/alfie,aptivate/kashana,daniell/kashana,daniell/kashana,aptivate/kashana,aptivate/alfie
|
Add test for get_month_shift handling December
|
from ..period_utils import get_month_shift
def test_get_month_shift_handles_december():
new_month, _ = get_month_shift(12, 1)
assert 12 == new_month
|
<commit_before><commit_msg>Add test for get_month_shift handling December<commit_after>
|
from ..period_utils import get_month_shift
def test_get_month_shift_handles_december():
new_month, _ = get_month_shift(12, 1)
assert 12 == new_month
|
Add test for get_month_shift handling Decemberfrom ..period_utils import get_month_shift
def test_get_month_shift_handles_december():
new_month, _ = get_month_shift(12, 1)
assert 12 == new_month
|
<commit_before><commit_msg>Add test for get_month_shift handling December<commit_after>from ..period_utils import get_month_shift
def test_get_month_shift_handles_december():
new_month, _ = get_month_shift(12, 1)
assert 12 == new_month
|
|
b23ab20a082e35e6e4c8bf9a535b1bbccd71be26
|
docs/examples/tests/pylab_figshow.py
|
docs/examples/tests/pylab_figshow.py
|
"""Manual test for figure.show() in the inline matplotlib backend.
This script should be loaded for interactive use (via %load) into a qtconsole
or notebook initialized with the pylab inline backend.
Expected behavior: only *one* copy of the figure is shown.
For further details:
https://github.com/ipython/ipython/issues/1612
https://github.com/matplotlib/matplotlib/issues/835
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
x = np.random.uniform(-5, 5, size=(100))
y = np.random.uniform(-5, 5, size=(100))
f = plt.figure()
plt.scatter(x, y)
plt.plot(y)
f.show()
|
Add manual test file as per review by @minrk.
|
Add manual test file as per review by @minrk.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add manual test file as per review by @minrk.
|
"""Manual test for figure.show() in the inline matplotlib backend.
This script should be loaded for interactive use (via %load) into a qtconsole
or notebook initialized with the pylab inline backend.
Expected behavior: only *one* copy of the figure is shown.
For further details:
https://github.com/ipython/ipython/issues/1612
https://github.com/matplotlib/matplotlib/issues/835
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
x = np.random.uniform(-5, 5, size=(100))
y = np.random.uniform(-5, 5, size=(100))
f = plt.figure()
plt.scatter(x, y)
plt.plot(y)
f.show()
|
<commit_before><commit_msg>Add manual test file as per review by @minrk.<commit_after>
|
"""Manual test for figure.show() in the inline matplotlib backend.
This script should be loaded for interactive use (via %load) into a qtconsole
or notebook initialized with the pylab inline backend.
Expected behavior: only *one* copy of the figure is shown.
For further details:
https://github.com/ipython/ipython/issues/1612
https://github.com/matplotlib/matplotlib/issues/835
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
x = np.random.uniform(-5, 5, size=(100))
y = np.random.uniform(-5, 5, size=(100))
f = plt.figure()
plt.scatter(x, y)
plt.plot(y)
f.show()
|
Add manual test file as per review by @minrk."""Manual test for figure.show() in the inline matplotlib backend.
This script should be loaded for interactive use (via %load) into a qtconsole
or notebook initialized with the pylab inline backend.
Expected behavior: only *one* copy of the figure is shown.
For further details:
https://github.com/ipython/ipython/issues/1612
https://github.com/matplotlib/matplotlib/issues/835
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
x = np.random.uniform(-5, 5, size=(100))
y = np.random.uniform(-5, 5, size=(100))
f = plt.figure()
plt.scatter(x, y)
plt.plot(y)
f.show()
|
<commit_before><commit_msg>Add manual test file as per review by @minrk.<commit_after>"""Manual test for figure.show() in the inline matplotlib backend.
This script should be loaded for interactive use (via %load) into a qtconsole
or notebook initialized with the pylab inline backend.
Expected behavior: only *one* copy of the figure is shown.
For further details:
https://github.com/ipython/ipython/issues/1612
https://github.com/matplotlib/matplotlib/issues/835
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ioff()
x = np.random.uniform(-5, 5, size=(100))
y = np.random.uniform(-5, 5, size=(100))
f = plt.figure()
plt.scatter(x, y)
plt.plot(y)
f.show()
|
|
99469256b4585b5c0056d69e153e7c628f4430c1
|
leak_matcher.py
|
leak_matcher.py
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
# If you add a line like this to the ctor for a class:
# printf_stderr("ZZZ CREATE %p\n", this);
# and a line line this to the dtor for a class:
# printf_stderr("ZZZ DESTROY %p\n", this);
# then this log will process the resulting mochitest log
# and give you the mochitest that was running when any such
# objects were allocated that had no matching dtor.
cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$')
anyUnknown = False
live = {}
currTest = None
for l in sys.stdin:
if not 'ZZ' in l:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
continue
cdm = cdMatch.match(l)
if not cdm:
print 'Unknown line: ', l,
anyUnknown = True
continue
isCreate = cdm.group(1) == 'CREATE'
assert isCreate or cdm.group(1) == 'DESTROY'
addr = cdm.group(2)
if len(addr) != 8:
print 'Not enough characters in address:', addr, l,
if isCreate:
assert not addr in live
assert currTest
live[addr] = currTest
else:
assert addr in live
del live[addr]
if anyUnknown:
exit(-1)
testCounts = {}
for liveAddr, inTest in live.iteritems():
testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1
for t, n in testCounts.iteritems():
print n, t
|
Add leak matcher for figuring out which test is leaking objects of a type
|
Add leak matcher for figuring out which test is leaking objects of a type
|
Python
|
mpl-2.0
|
amccreight/mochitest-logs
|
Add leak matcher for figuring out which test is leaking objects of a type
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
# If you add a line like this to the ctor for a class:
# printf_stderr("ZZZ CREATE %p\n", this);
# and a line line this to the dtor for a class:
# printf_stderr("ZZZ DESTROY %p\n", this);
# then this log will process the resulting mochitest log
# and give you the mochitest that was running when any such
# objects were allocated that had no matching dtor.
cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$')
anyUnknown = False
live = {}
currTest = None
for l in sys.stdin:
if not 'ZZ' in l:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
continue
cdm = cdMatch.match(l)
if not cdm:
print 'Unknown line: ', l,
anyUnknown = True
continue
isCreate = cdm.group(1) == 'CREATE'
assert isCreate or cdm.group(1) == 'DESTROY'
addr = cdm.group(2)
if len(addr) != 8:
print 'Not enough characters in address:', addr, l,
if isCreate:
assert not addr in live
assert currTest
live[addr] = currTest
else:
assert addr in live
del live[addr]
if anyUnknown:
exit(-1)
testCounts = {}
for liveAddr, inTest in live.iteritems():
testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1
for t, n in testCounts.iteritems():
print n, t
|
<commit_before><commit_msg>Add leak matcher for figuring out which test is leaking objects of a type<commit_after>
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
# If you add a line like this to the ctor for a class:
# printf_stderr("ZZZ CREATE %p\n", this);
# and a line line this to the dtor for a class:
# printf_stderr("ZZZ DESTROY %p\n", this);
# then this log will process the resulting mochitest log
# and give you the mochitest that was running when any such
# objects were allocated that had no matching dtor.
cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$')
anyUnknown = False
live = {}
currTest = None
for l in sys.stdin:
if not 'ZZ' in l:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
continue
cdm = cdMatch.match(l)
if not cdm:
print 'Unknown line: ', l,
anyUnknown = True
continue
isCreate = cdm.group(1) == 'CREATE'
assert isCreate or cdm.group(1) == 'DESTROY'
addr = cdm.group(2)
if len(addr) != 8:
print 'Not enough characters in address:', addr, l,
if isCreate:
assert not addr in live
assert currTest
live[addr] = currTest
else:
assert addr in live
del live[addr]
if anyUnknown:
exit(-1)
testCounts = {}
for liveAddr, inTest in live.iteritems():
testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1
for t, n in testCounts.iteritems():
print n, t
|
Add leak matcher for figuring out which test is leaking objects of a type#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
# If you add a line like this to the ctor for a class:
# printf_stderr("ZZZ CREATE %p\n", this);
# and a line line this to the dtor for a class:
# printf_stderr("ZZZ DESTROY %p\n", this);
# then this log will process the resulting mochitest log
# and give you the mochitest that was running when any such
# objects were allocated that had no matching dtor.
cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$')
anyUnknown = False
live = {}
currTest = None
for l in sys.stdin:
if not 'ZZ' in l:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
continue
cdm = cdMatch.match(l)
if not cdm:
print 'Unknown line: ', l,
anyUnknown = True
continue
isCreate = cdm.group(1) == 'CREATE'
assert isCreate or cdm.group(1) == 'DESTROY'
addr = cdm.group(2)
if len(addr) != 8:
print 'Not enough characters in address:', addr, l,
if isCreate:
assert not addr in live
assert currTest
live[addr] = currTest
else:
assert addr in live
del live[addr]
if anyUnknown:
exit(-1)
testCounts = {}
for liveAddr, inTest in live.iteritems():
testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1
for t, n in testCounts.iteritems():
print n, t
|
<commit_before><commit_msg>Add leak matcher for figuring out which test is leaking objects of a type<commit_after>#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import re
# If you add a line like this to the ctor for a class:
# printf_stderr("ZZZ CREATE %p\n", this);
# and a line line this to the dtor for a class:
# printf_stderr("ZZZ DESTROY %p\n", this);
# then this log will process the resulting mochitest log
# and give you the mochitest that was running when any such
# objects were allocated that had no matching dtor.
cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$')
anyUnknown = False
live = {}
currTest = None
for l in sys.stdin:
if not 'ZZ' in l:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
continue
cdm = cdMatch.match(l)
if not cdm:
print 'Unknown line: ', l,
anyUnknown = True
continue
isCreate = cdm.group(1) == 'CREATE'
assert isCreate or cdm.group(1) == 'DESTROY'
addr = cdm.group(2)
if len(addr) != 8:
print 'Not enough characters in address:', addr, l,
if isCreate:
assert not addr in live
assert currTest
live[addr] = currTest
else:
assert addr in live
del live[addr]
if anyUnknown:
exit(-1)
testCounts = {}
for liveAddr, inTest in live.iteritems():
testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1
for t, n in testCounts.iteritems():
print n, t
|
|
781af46113a8b7c02e0271efccfebb2cc5d3494c
|
mclearn/aggregators.py
|
mclearn/aggregators.py
|
""" Rank aggregators. """
import itertools
import functools
import numpy as np
def borda_count(voters, n_candidates):
""" Borda count rank aggregator.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
max_score = len(voters[0])
points = {}
# accumulate the points for each candidate
for voter in voters:
for idx, candidate in enumerate(voter):
points[candidate] = points.get(candidate, 0) + max_score - idx
# sort the candidates and return the most popular one(s)
rank = sorted(points, key=points.__getitem__, reverse=True)
return rank[:n_candidates]
def schulze_method(voters, n_candidates):
""" Schulze method of ordering candidates.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
points = {}
candidates = set()
for voter in voters:
for idx_i in range(len(voter)):
for idx_j in range(idx_i + 1, len(voter)):
i = voter[idx_i]
j = voter[idx_j]
points[(i, j)] = points.get((i, j), 0) + 1
# get the id of all candidates
candidates = set(itertools.chain(*points.keys()))
# compute the strogest path using a variant of Floyd–Warshall algorithm
strength = {}
for i, j in itertools.product(candidates, repeat=2):
if i != j:
points.setdefault((i, j), 0)
points.setdefault((j, i), 0)
if points[(i, j)] > points[(j, i)]:
strength[(i, j)] = points[(i, j)]
else:
strength[(i, j)] = 0
# k is the expanding set of intermediate nodes in Floyd-Warshall
for k, i, j in itertools.product(candidates, repeat=3):
if (i != j) and (k != i) and (k != j):
strength[(i, j)] = max(strength[(i, j)], min(strength[(i, k)], strength[(k, j)]))
# Schulze method guarantees that there is no cycle, so sorting is well-defined
compare_strength = lambda x, y: strength[(x, y)] - strength[(y, x)]
rank = sorted(candidates, key=functools.cmp_to_key(compare_strength))
return rank[:n_candidates]
|
Implement Borda count and Schulze method for ranking
|
Implement Borda count and Schulze method for ranking
|
Python
|
bsd-3-clause
|
chengsoonong/mclass-sky,chengsoonong/mclass-sky,chengsoonong/mclass-sky,chengsoonong/mclass-sky,alasdairtran/mclearn,alasdairtran/mclearn,alasdairtran/mclearn,alasdairtran/mclearn
|
Implement Borda count and Schulze method for ranking
|
""" Rank aggregators. """
import itertools
import functools
import numpy as np
def borda_count(voters, n_candidates):
""" Borda count rank aggregator.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
max_score = len(voters[0])
points = {}
# accumulate the points for each candidate
for voter in voters:
for idx, candidate in enumerate(voter):
points[candidate] = points.get(candidate, 0) + max_score - idx
# sort the candidates and return the most popular one(s)
rank = sorted(points, key=points.__getitem__, reverse=True)
return rank[:n_candidates]
def schulze_method(voters, n_candidates):
""" Schulze method of ordering candidates.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
points = {}
candidates = set()
for voter in voters:
for idx_i in range(len(voter)):
for idx_j in range(idx_i + 1, len(voter)):
i = voter[idx_i]
j = voter[idx_j]
points[(i, j)] = points.get((i, j), 0) + 1
# get the id of all candidates
candidates = set(itertools.chain(*points.keys()))
# compute the strogest path using a variant of Floyd–Warshall algorithm
strength = {}
for i, j in itertools.product(candidates, repeat=2):
if i != j:
points.setdefault((i, j), 0)
points.setdefault((j, i), 0)
if points[(i, j)] > points[(j, i)]:
strength[(i, j)] = points[(i, j)]
else:
strength[(i, j)] = 0
# k is the expanding set of intermediate nodes in Floyd-Warshall
for k, i, j in itertools.product(candidates, repeat=3):
if (i != j) and (k != i) and (k != j):
strength[(i, j)] = max(strength[(i, j)], min(strength[(i, k)], strength[(k, j)]))
# Schulze method guarantees that there is no cycle, so sorting is well-defined
compare_strength = lambda x, y: strength[(x, y)] - strength[(y, x)]
rank = sorted(candidates, key=functools.cmp_to_key(compare_strength))
return rank[:n_candidates]
|
<commit_before><commit_msg>Implement Borda count and Schulze method for ranking<commit_after>
|
""" Rank aggregators. """
import itertools
import functools
import numpy as np
def borda_count(voters, n_candidates):
""" Borda count rank aggregator.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
max_score = len(voters[0])
points = {}
# accumulate the points for each candidate
for voter in voters:
for idx, candidate in enumerate(voter):
points[candidate] = points.get(candidate, 0) + max_score - idx
# sort the candidates and return the most popular one(s)
rank = sorted(points, key=points.__getitem__, reverse=True)
return rank[:n_candidates]
def schulze_method(voters, n_candidates):
""" Schulze method of ordering candidates.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
points = {}
candidates = set()
for voter in voters:
for idx_i in range(len(voter)):
for idx_j in range(idx_i + 1, len(voter)):
i = voter[idx_i]
j = voter[idx_j]
points[(i, j)] = points.get((i, j), 0) + 1
# get the id of all candidates
candidates = set(itertools.chain(*points.keys()))
# compute the strogest path using a variant of Floyd–Warshall algorithm
strength = {}
for i, j in itertools.product(candidates, repeat=2):
if i != j:
points.setdefault((i, j), 0)
points.setdefault((j, i), 0)
if points[(i, j)] > points[(j, i)]:
strength[(i, j)] = points[(i, j)]
else:
strength[(i, j)] = 0
# k is the expanding set of intermediate nodes in Floyd-Warshall
for k, i, j in itertools.product(candidates, repeat=3):
if (i != j) and (k != i) and (k != j):
strength[(i, j)] = max(strength[(i, j)], min(strength[(i, k)], strength[(k, j)]))
# Schulze method guarantees that there is no cycle, so sorting is well-defined
compare_strength = lambda x, y: strength[(x, y)] - strength[(y, x)]
rank = sorted(candidates, key=functools.cmp_to_key(compare_strength))
return rank[:n_candidates]
|
Implement Borda count and Schulze method for ranking""" Rank aggregators. """
import itertools
import functools
import numpy as np
def borda_count(voters, n_candidates):
""" Borda count rank aggregator.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
max_score = len(voters[0])
points = {}
# accumulate the points for each candidate
for voter in voters:
for idx, candidate in enumerate(voter):
points[candidate] = points.get(candidate, 0) + max_score - idx
# sort the candidates and return the most popular one(s)
rank = sorted(points, key=points.__getitem__, reverse=True)
return rank[:n_candidates]
def schulze_method(voters, n_candidates):
""" Schulze method of ordering candidates.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
points = {}
candidates = set()
for voter in voters:
for idx_i in range(len(voter)):
for idx_j in range(idx_i + 1, len(voter)):
i = voter[idx_i]
j = voter[idx_j]
points[(i, j)] = points.get((i, j), 0) + 1
# get the id of all candidates
candidates = set(itertools.chain(*points.keys()))
# compute the strogest path using a variant of Floyd–Warshall algorithm
strength = {}
for i, j in itertools.product(candidates, repeat=2):
if i != j:
points.setdefault((i, j), 0)
points.setdefault((j, i), 0)
if points[(i, j)] > points[(j, i)]:
strength[(i, j)] = points[(i, j)]
else:
strength[(i, j)] = 0
# k is the expanding set of intermediate nodes in Floyd-Warshall
for k, i, j in itertools.product(candidates, repeat=3):
if (i != j) and (k != i) and (k != j):
strength[(i, j)] = max(strength[(i, j)], min(strength[(i, k)], strength[(k, j)]))
# Schulze method guarantees that there is no cycle, so sorting is well-defined
compare_strength = lambda x, y: strength[(x, y)] - strength[(y, x)]
rank = sorted(candidates, key=functools.cmp_to_key(compare_strength))
return rank[:n_candidates]
|
<commit_before><commit_msg>Implement Borda count and Schulze method for ranking<commit_after>""" Rank aggregators. """
import itertools
import functools
import numpy as np
def borda_count(voters, n_candidates):
""" Borda count rank aggregator.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
max_score = len(voters[0])
points = {}
# accumulate the points for each candidate
for voter in voters:
for idx, candidate in enumerate(voter):
points[candidate] = points.get(candidate, 0) + max_score - idx
# sort the candidates and return the most popular one(s)
rank = sorted(points, key=points.__getitem__, reverse=True)
return rank[:n_candidates]
def schulze_method(voters, n_candidates):
""" Schulze method of ordering candidates.
Parameters
----------
voters : list-like
A list of arrays where each array correponds to a voter's preference.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : array
The list of indices of the best candidates.
"""
points = {}
candidates = set()
for voter in voters:
for idx_i in range(len(voter)):
for idx_j in range(idx_i + 1, len(voter)):
i = voter[idx_i]
j = voter[idx_j]
points[(i, j)] = points.get((i, j), 0) + 1
# get the id of all candidates
candidates = set(itertools.chain(*points.keys()))
# compute the strogest path using a variant of Floyd–Warshall algorithm
strength = {}
for i, j in itertools.product(candidates, repeat=2):
if i != j:
points.setdefault((i, j), 0)
points.setdefault((j, i), 0)
if points[(i, j)] > points[(j, i)]:
strength[(i, j)] = points[(i, j)]
else:
strength[(i, j)] = 0
# k is the expanding set of intermediate nodes in Floyd-Warshall
for k, i, j in itertools.product(candidates, repeat=3):
if (i != j) and (k != i) and (k != j):
strength[(i, j)] = max(strength[(i, j)], min(strength[(i, k)], strength[(k, j)]))
# Schulze method guarantees that there is no cycle, so sorting is well-defined
compare_strength = lambda x, y: strength[(x, y)] - strength[(y, x)]
rank = sorted(candidates, key=functools.cmp_to_key(compare_strength))
return rank[:n_candidates]
|
|
4df3b93a138cb1de9104f32bcd88a3b359e718be
|
lighty/utils.py
|
lighty/utils.py
|
'''Some utilitary classes usually used to make working with different python
versions and different environment easier:
string_types - basestring for python 2 and str fot python 3
dict_keys - convert dict keys to list
div_operators - operators for division
with_metaclass - metaclasses
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
div_operators = (operator.__truediv__, operator.__floordiv__)
if PY3:
string_types = str
dict_keys = lambda keys: [i for i in keys.__iter__()]
else:
string_types = basestring
dict_keys = lambda keys: keys
div_operators += (operator.__div__, )
def with_metaclass(meta, base=object):
'''Create a new class with base class base and metaclass metaclass. This is
designed to be used in class declarations like this::
from lighty.utils import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
'''
return meta("NewBase", (base, ), {})
|
Add missed file from previous commit
|
Add missed file from previous commit
|
Python
|
bsd-3-clause
|
GrAndSE/lighty
|
Add missed file from previous commit
|
'''Some utilitary classes usually used to make working with different python
versions and different environment easier:
string_types - basestring for python 2 and str fot python 3
dict_keys - convert dict keys to list
div_operators - operators for division
with_metaclass - metaclasses
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
div_operators = (operator.__truediv__, operator.__floordiv__)
if PY3:
string_types = str
dict_keys = lambda keys: [i for i in keys.__iter__()]
else:
string_types = basestring
dict_keys = lambda keys: keys
div_operators += (operator.__div__, )
def with_metaclass(meta, base=object):
'''Create a new class with base class base and metaclass metaclass. This is
designed to be used in class declarations like this::
from lighty.utils import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
'''
return meta("NewBase", (base, ), {})
|
<commit_before><commit_msg>Add missed file from previous commit<commit_after>
|
'''Some utilitary classes usually used to make working with different python
versions and different environment easier:
string_types - basestring for python 2 and str fot python 3
dict_keys - convert dict keys to list
div_operators - operators for division
with_metaclass - metaclasses
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
div_operators = (operator.__truediv__, operator.__floordiv__)
if PY3:
string_types = str
dict_keys = lambda keys: [i for i in keys.__iter__()]
else:
string_types = basestring
dict_keys = lambda keys: keys
div_operators += (operator.__div__, )
def with_metaclass(meta, base=object):
'''Create a new class with base class base and metaclass metaclass. This is
designed to be used in class declarations like this::
from lighty.utils import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
'''
return meta("NewBase", (base, ), {})
|
Add missed file from previous commit'''Some utilitary classes usually used to make working with different python
versions and different environment easier:
string_types - basestring for python 2 and str fot python 3
dict_keys - convert dict keys to list
div_operators - operators for division
with_metaclass - metaclasses
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
div_operators = (operator.__truediv__, operator.__floordiv__)
if PY3:
string_types = str
dict_keys = lambda keys: [i for i in keys.__iter__()]
else:
string_types = basestring
dict_keys = lambda keys: keys
div_operators += (operator.__div__, )
def with_metaclass(meta, base=object):
'''Create a new class with base class base and metaclass metaclass. This is
designed to be used in class declarations like this::
from lighty.utils import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
'''
return meta("NewBase", (base, ), {})
|
<commit_before><commit_msg>Add missed file from previous commit<commit_after>'''Some utilitary classes usually used to make working with different python
versions and different environment easier:
string_types - basestring for python 2 and str fot python 3
dict_keys - convert dict keys to list
div_operators - operators for division
with_metaclass - metaclasses
'''
import sys
import operator
PY3 = sys.version_info[0] == 3
div_operators = (operator.__truediv__, operator.__floordiv__)
if PY3:
string_types = str
dict_keys = lambda keys: [i for i in keys.__iter__()]
else:
string_types = basestring
dict_keys = lambda keys: keys
div_operators += (operator.__div__, )
def with_metaclass(meta, base=object):
'''Create a new class with base class base and metaclass metaclass. This is
designed to be used in class declarations like this::
from lighty.utils import with_metaclass
class Meta(type):
pass
class Base(object):
pass
class MyClass(with_metaclass(Meta, Base)):
pass
'''
return meta("NewBase", (base, ), {})
|
|
a7e366122096cfe3e31dbd30a6bf1322befe8890
|
tests/quotes_in_attributes.py
|
tests/quotes_in_attributes.py
|
# coding:utf-8
from base import TestBase
class TestQuotesInAttributes(TestBase):
"""Test for quotes in attributes"""
# for debug
# def tearDown(self):
# pass
text = \
"""
<!-- MarkdownTOC {0} -->
<!-- /MarkdownTOC -->
# foo
## bar
"""
def test_no_quote(self):
"""Allow no quotes in attribute"""
toc = self.init_update(self.text.format('levels=1'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_single(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format("levels='1'"))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_double(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format('levels="1"'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
|
Add test for quotes check in attributes
|
Add test for quotes check in attributes
|
Python
|
mit
|
naokazuterada/MarkdownTOC
|
Add test for quotes check in attributes
|
# coding:utf-8
from base import TestBase
class TestQuotesInAttributes(TestBase):
"""Test for quotes in attributes"""
# for debug
# def tearDown(self):
# pass
text = \
"""
<!-- MarkdownTOC {0} -->
<!-- /MarkdownTOC -->
# foo
## bar
"""
def test_no_quote(self):
"""Allow no quotes in attribute"""
toc = self.init_update(self.text.format('levels=1'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_single(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format("levels='1'"))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_double(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format('levels="1"'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
|
<commit_before><commit_msg>Add test for quotes check in attributes<commit_after>
|
# coding:utf-8
from base import TestBase
class TestQuotesInAttributes(TestBase):
"""Test for quotes in attributes"""
# for debug
# def tearDown(self):
# pass
text = \
"""
<!-- MarkdownTOC {0} -->
<!-- /MarkdownTOC -->
# foo
## bar
"""
def test_no_quote(self):
"""Allow no quotes in attribute"""
toc = self.init_update(self.text.format('levels=1'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_single(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format("levels='1'"))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_double(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format('levels="1"'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
|
Add test for quotes check in attributes# coding:utf-8
from base import TestBase
class TestQuotesInAttributes(TestBase):
"""Test for quotes in attributes"""
# for debug
# def tearDown(self):
# pass
text = \
"""
<!-- MarkdownTOC {0} -->
<!-- /MarkdownTOC -->
# foo
## bar
"""
def test_no_quote(self):
"""Allow no quotes in attribute"""
toc = self.init_update(self.text.format('levels=1'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_single(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format("levels='1'"))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_double(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format('levels="1"'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
|
<commit_before><commit_msg>Add test for quotes check in attributes<commit_after># coding:utf-8
from base import TestBase
class TestQuotesInAttributes(TestBase):
"""Test for quotes in attributes"""
# for debug
# def tearDown(self):
# pass
text = \
"""
<!-- MarkdownTOC {0} -->
<!-- /MarkdownTOC -->
# foo
## bar
"""
def test_no_quote(self):
"""Allow no quotes in attribute"""
toc = self.init_update(self.text.format('levels=1'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_single(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format("levels='1'"))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
def test_double(self):
"""Allow single quotes in attribute"""
toc = self.init_update(self.text.format('levels="1"'))['toc']
self.assert_In('- foo', toc)
self.assert_NotIn('- bar', toc)
|
|
42f559db432da8fdbd95b20bd5b21c8f6e5721b8
|
nlpppln/frog_to_saf.py
|
nlpppln/frog_to_saf.py
|
#!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def frog2saf(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frog_files = os.listdir(input_dir)
for fi in frog_files:
with codecs.open(os.path.join(input_dir, fi)) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
out_file = os.path.join(output_dir, '{}.json'.format(fi))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
|
Add script to convert frog output to saf
|
Add script to convert frog output to saf
|
Python
|
apache-2.0
|
WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln
|
Add script to convert frog output to saf
|
#!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def frog2saf(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frog_files = os.listdir(input_dir)
for fi in frog_files:
with codecs.open(os.path.join(input_dir, fi)) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
out_file = os.path.join(output_dir, '{}.json'.format(fi))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
|
<commit_before><commit_msg>Add script to convert frog output to saf<commit_after>
|
#!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def frog2saf(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frog_files = os.listdir(input_dir)
for fi in frog_files:
with codecs.open(os.path.join(input_dir, fi)) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
out_file = os.path.join(output_dir, '{}.json'.format(fi))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
|
Add script to convert frog output to saf#!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def frog2saf(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frog_files = os.listdir(input_dir)
for fi in frog_files:
with codecs.open(os.path.join(input_dir, fi)) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
out_file = os.path.join(output_dir, '{}.json'.format(fi))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
|
<commit_before><commit_msg>Add script to convert frog output to saf<commit_after>#!/usr/bin/env python
import click
import os
import codecs
import json
from xtas.tasks._frog import parse_frog, frog_to_saf
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def frog2saf(input_dir, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frog_files = os.listdir(input_dir)
for fi in frog_files:
with codecs.open(os.path.join(input_dir, fi)) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
saf_data = frog_to_saf(parse_frog(lines))
out_file = os.path.join(output_dir, '{}.json'.format(fi))
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(saf_data, f, indent=4)
if __name__ == '__main__':
frog2saf()
|
|
b6de36cec53d26716ff340c0f42ecd36e3488e12
|
tools/data/track_proto_to_zip.py
|
tools/data/track_proto_to_zip.py
|
#!/usr/bin/env python
import argparse
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../src/'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from tpn.data_io import save_track_proto_to_zip
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proto_file')
parser.add_argument('save_zip')
args = parser.parse_args()
proto = proto_load(args.proto_file)
|
Add a script to convert track_proto to zip file for RNN training.
|
Add a script to convert track_proto to zip file for RNN training.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to convert track_proto to zip file for RNN training.
|
#!/usr/bin/env python
import argparse
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../src/'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from tpn.data_io import save_track_proto_to_zip
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proto_file')
parser.add_argument('save_zip')
args = parser.parse_args()
proto = proto_load(args.proto_file)
|
<commit_before><commit_msg>Add a script to convert track_proto to zip file for RNN training.<commit_after>
|
#!/usr/bin/env python
import argparse
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../src/'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from tpn.data_io import save_track_proto_to_zip
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proto_file')
parser.add_argument('save_zip')
args = parser.parse_args()
proto = proto_load(args.proto_file)
|
Add a script to convert track_proto to zip file for RNN training.#!/usr/bin/env python
import argparse
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../src/'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from tpn.data_io import save_track_proto_to_zip
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proto_file')
parser.add_argument('save_zip')
args = parser.parse_args()
proto = proto_load(args.proto_file)
|
<commit_before><commit_msg>Add a script to convert track_proto to zip file for RNN training.<commit_after>#!/usr/bin/env python
import argparse
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../src/'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from tpn.data_io import save_track_proto_to_zip
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proto_file')
parser.add_argument('save_zip')
args = parser.parse_args()
proto = proto_load(args.proto_file)
|
|
daca419af5f75443a09c6897e968c4af158412c1
|
tempest/tests/services/compute/test_migrations_client.py
|
tempest/tests/services/compute/test_migrations_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import migrations_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestMigrationsClient(base.BaseComputeServiceTest):
FAKE_MIGRATION_INFO = {"migrations": [{
"created_at": "2012-10-29T13:42:02",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
"instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "finished",
"updated_at": "2012-10-29T13:42:02"}]}
def setUp(self):
super(TestMigrationsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.mg_client_obj = migrations_client.MigrationsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_migrations(self, bytes_body=False):
self.check_service_client_function(
self.mg_client_obj.list_migrations,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_MIGRATION_INFO,
bytes_body)
def test_list_migration_with_str_body(self):
self._test_list_migrations()
def test_list_migration_with_bytes_body(self):
self._test_list_migrations(True)
|
Add unit tests for migrations_client
|
Add unit tests for migrations_client
This patch adds unit tests for migrations_client.
Change-Id: I9bb9556d9c9b821c55c97f57dd783c00e5a05e04
|
Python
|
apache-2.0
|
vedujoshi/tempest,flyingfish007/tempest,LIS/lis-tempest,cisco-openstack/tempest,openstack/tempest,masayukig/tempest,openstack/tempest,tonyli71/tempest,bigswitch/tempest,sebrandon1/tempest,izadorozhna/tempest,sebrandon1/tempest,bigswitch/tempest,tonyli71/tempest,flyingfish007/tempest,zsoltdudas/lis-tempest,Juniper/tempest,vedujoshi/tempest,Tesora/tesora-tempest,dkalashnik/tempest,rakeshmi/tempest,Tesora/tesora-tempest,xbezdick/tempest,Juniper/tempest,cisco-openstack/tempest,LIS/lis-tempest,dkalashnik/tempest,xbezdick/tempest,izadorozhna/tempest,masayukig/tempest,rakeshmi/tempest,zsoltdudas/lis-tempest
|
Add unit tests for migrations_client
This patch adds unit tests for migrations_client.
Change-Id: I9bb9556d9c9b821c55c97f57dd783c00e5a05e04
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import migrations_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestMigrationsClient(base.BaseComputeServiceTest):
FAKE_MIGRATION_INFO = {"migrations": [{
"created_at": "2012-10-29T13:42:02",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
"instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "finished",
"updated_at": "2012-10-29T13:42:02"}]}
def setUp(self):
super(TestMigrationsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.mg_client_obj = migrations_client.MigrationsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_migrations(self, bytes_body=False):
self.check_service_client_function(
self.mg_client_obj.list_migrations,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_MIGRATION_INFO,
bytes_body)
def test_list_migration_with_str_body(self):
self._test_list_migrations()
def test_list_migration_with_bytes_body(self):
self._test_list_migrations(True)
|
<commit_before><commit_msg>Add unit tests for migrations_client
This patch adds unit tests for migrations_client.
Change-Id: I9bb9556d9c9b821c55c97f57dd783c00e5a05e04<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import migrations_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestMigrationsClient(base.BaseComputeServiceTest):
FAKE_MIGRATION_INFO = {"migrations": [{
"created_at": "2012-10-29T13:42:02",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
"instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "finished",
"updated_at": "2012-10-29T13:42:02"}]}
def setUp(self):
super(TestMigrationsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.mg_client_obj = migrations_client.MigrationsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_migrations(self, bytes_body=False):
self.check_service_client_function(
self.mg_client_obj.list_migrations,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_MIGRATION_INFO,
bytes_body)
def test_list_migration_with_str_body(self):
self._test_list_migrations()
def test_list_migration_with_bytes_body(self):
self._test_list_migrations(True)
|
Add unit tests for migrations_client
This patch adds unit tests for migrations_client.
Change-Id: I9bb9556d9c9b821c55c97f57dd783c00e5a05e04# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import migrations_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestMigrationsClient(base.BaseComputeServiceTest):
FAKE_MIGRATION_INFO = {"migrations": [{
"created_at": "2012-10-29T13:42:02",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
"instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "finished",
"updated_at": "2012-10-29T13:42:02"}]}
def setUp(self):
super(TestMigrationsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.mg_client_obj = migrations_client.MigrationsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_migrations(self, bytes_body=False):
self.check_service_client_function(
self.mg_client_obj.list_migrations,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_MIGRATION_INFO,
bytes_body)
def test_list_migration_with_str_body(self):
self._test_list_migrations()
def test_list_migration_with_bytes_body(self):
self._test_list_migrations(True)
|
<commit_before><commit_msg>Add unit tests for migrations_client
This patch adds unit tests for migrations_client.
Change-Id: I9bb9556d9c9b821c55c97f57dd783c00e5a05e04<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import migrations_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestMigrationsClient(base.BaseComputeServiceTest):
FAKE_MIGRATION_INFO = {"migrations": [{
"created_at": "2012-10-29T13:42:02",
"dest_compute": "compute2",
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
"instance_uuid": "e9e4fdd7-f956-44ff-bfeb-d654a96ab3a2",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
"status": "finished",
"updated_at": "2012-10-29T13:42:02"}]}
def setUp(self):
super(TestMigrationsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.mg_client_obj = migrations_client.MigrationsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_migrations(self, bytes_body=False):
self.check_service_client_function(
self.mg_client_obj.list_migrations,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_MIGRATION_INFO,
bytes_body)
def test_list_migration_with_str_body(self):
self._test_list_migrations()
def test_list_migration_with_bytes_body(self):
self._test_list_migrations(True)
|
|
ac1acad4ba9f31fc306e95668d489424611a681e
|
lsv_compassion/model/invoice_line.py
|
lsv_compassion/model/invoice_line.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
Add missing file in previous commit.
|
Add missing file in previous commit.
|
Python
|
agpl-3.0
|
eicher31/compassion-switzerland,eicher31/compassion-switzerland,Secheron/compassion-switzerland,CompassionCH/compassion-switzerland,Secheron/compassion-switzerland,CompassionCH/compassion-switzerland,eicher31/compassion-switzerland,ecino/compassion-switzerland,ecino/compassion-switzerland,CompassionCH/compassion-switzerland,ecino/compassion-switzerland,MickSandoz/compassion-switzerland,MickSandoz/compassion-switzerland,ndtran/compassion-switzerland,ndtran/compassion-switzerland
|
Add missing file in previous commit.
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
<commit_before><commit_msg>Add missing file in previous commit.<commit_after>
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
Add missing file in previous commit.# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
<commit_before><commit_msg>Add missing file in previous commit.<commit_after># -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
|
380429426835f30f6322f1154b9deb96629807cd
|
tests/data_checks/test_trans_associations.py
|
tests/data_checks/test_trans_associations.py
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestTransAssociations(TestPostgapBase):
'''
Consider first association per group only, as chromosomes
being consistent within groups is tested elsewhere.
'''
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def assert_no_trans_associations(self, chrom_field, gene_chrom_field):
firsts = self.per_gene_and_ld_snp.head(1)
chroms_match = (firsts[gene_chrom_field] == firsts[chrom_field])
all_chroms_match = chroms_match.all()
first_exception = None
if (not all_chroms_match):
fields = ['ld_snp_rsID', 'gene_id', 'gene_symbol', chrom_field, gene_chrom_field]
first_exception = firsts[~chroms_match][fields].head(1).to_string(index=False)
self.assertTrue(all_chroms_match, first_exception)
def test_trans_associations_filtered(self):
self.assert_no_trans_associations('chrom', 'gene_chrom')
def test_trans_associations_filtered_GRCh38(self):
self.assert_no_trans_associations('GRCh38_chrom', 'GRCh38_gene_chrom')
if __name__ == '__main__':
unittest.main()
|
Check for chrom gene_chrom mismatches
|
Check for chrom gene_chrom mismatches
|
Python
|
apache-2.0
|
Ensembl/cttv024,Ensembl/cttv024
|
Check for chrom gene_chrom mismatches
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestTransAssociations(TestPostgapBase):
'''
Consider first association per group only, as chromosomes
being consistent within groups is tested elsewhere.
'''
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def assert_no_trans_associations(self, chrom_field, gene_chrom_field):
firsts = self.per_gene_and_ld_snp.head(1)
chroms_match = (firsts[gene_chrom_field] == firsts[chrom_field])
all_chroms_match = chroms_match.all()
first_exception = None
if (not all_chroms_match):
fields = ['ld_snp_rsID', 'gene_id', 'gene_symbol', chrom_field, gene_chrom_field]
first_exception = firsts[~chroms_match][fields].head(1).to_string(index=False)
self.assertTrue(all_chroms_match, first_exception)
def test_trans_associations_filtered(self):
self.assert_no_trans_associations('chrom', 'gene_chrom')
def test_trans_associations_filtered_GRCh38(self):
self.assert_no_trans_associations('GRCh38_chrom', 'GRCh38_gene_chrom')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check for chrom gene_chrom mismatches<commit_after>
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestTransAssociations(TestPostgapBase):
'''
Consider first association per group only, as chromosomes
being consistent within groups is tested elsewhere.
'''
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def assert_no_trans_associations(self, chrom_field, gene_chrom_field):
firsts = self.per_gene_and_ld_snp.head(1)
chroms_match = (firsts[gene_chrom_field] == firsts[chrom_field])
all_chroms_match = chroms_match.all()
first_exception = None
if (not all_chroms_match):
fields = ['ld_snp_rsID', 'gene_id', 'gene_symbol', chrom_field, gene_chrom_field]
first_exception = firsts[~chroms_match][fields].head(1).to_string(index=False)
self.assertTrue(all_chroms_match, first_exception)
def test_trans_associations_filtered(self):
self.assert_no_trans_associations('chrom', 'gene_chrom')
def test_trans_associations_filtered_GRCh38(self):
self.assert_no_trans_associations('GRCh38_chrom', 'GRCh38_gene_chrom')
if __name__ == '__main__':
unittest.main()
|
Check for chrom gene_chrom mismatches# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestTransAssociations(TestPostgapBase):
'''
Consider first association per group only, as chromosomes
being consistent within groups is tested elsewhere.
'''
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def assert_no_trans_associations(self, chrom_field, gene_chrom_field):
firsts = self.per_gene_and_ld_snp.head(1)
chroms_match = (firsts[gene_chrom_field] == firsts[chrom_field])
all_chroms_match = chroms_match.all()
first_exception = None
if (not all_chroms_match):
fields = ['ld_snp_rsID', 'gene_id', 'gene_symbol', chrom_field, gene_chrom_field]
first_exception = firsts[~chroms_match][fields].head(1).to_string(index=False)
self.assertTrue(all_chroms_match, first_exception)
def test_trans_associations_filtered(self):
self.assert_no_trans_associations('chrom', 'gene_chrom')
def test_trans_associations_filtered_GRCh38(self):
self.assert_no_trans_associations('GRCh38_chrom', 'GRCh38_gene_chrom')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check for chrom gene_chrom mismatches<commit_after># ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestTransAssociations(TestPostgapBase):
'''
Consider first association per group only, as chromosomes
being consistent within groups is tested elsewhere.
'''
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def assert_no_trans_associations(self, chrom_field, gene_chrom_field):
firsts = self.per_gene_and_ld_snp.head(1)
chroms_match = (firsts[gene_chrom_field] == firsts[chrom_field])
all_chroms_match = chroms_match.all()
first_exception = None
if (not all_chroms_match):
fields = ['ld_snp_rsID', 'gene_id', 'gene_symbol', chrom_field, gene_chrom_field]
first_exception = firsts[~chroms_match][fields].head(1).to_string(index=False)
self.assertTrue(all_chroms_match, first_exception)
def test_trans_associations_filtered(self):
self.assert_no_trans_associations('chrom', 'gene_chrom')
def test_trans_associations_filtered_GRCh38(self):
self.assert_no_trans_associations('GRCh38_chrom', 'GRCh38_gene_chrom')
if __name__ == '__main__':
unittest.main()
|
|
a5314901f123dac6633bd7e7ece6a4d68805070a
|
ice_setup/tests/test_paths.py
|
ice_setup/tests/test_paths.py
|
import tempfile
import os
import shutil
import pytest
from ice_setup.ice import get_package_source, DirNotFound, VersionNotFound
@pytest.fixture
def pkg_path():
pkg_path = tempfile.mkdtemp()
def fin():
shutil.rmtree(pkg_path)
return pkg_path
class TestGetPackageSource(object):
def test_unversioned_missing(self, pkg_path):
with pytest.raises(DirNotFound):
get_package_source(pkg_path, 'ceph')
def test_unversioned_ok(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
path = get_package_source(pkg_path, 'ceph')
assert path == os.path.join(pkg_path, 'ceph')
def test_versioned_missing(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
with pytest.raises(VersionNotFound):
get_package_source(pkg_path, 'ceph', traverse=True)
def test_versioned_ok(self, pkg_path):
os.makedirs(os.path.join(pkg_path, 'ceph/0.80.0'))
path = get_package_source(pkg_path, 'ceph', traverse=True)
assert path == os.path.join(pkg_path, 'ceph/0.80.0')
|
Add tests for package path building
|
Add tests for package path building
Unit tests for get_package_source()
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
trhoden/ice-setup,ceph/ice-setup,ktdreyer/ice-setup
|
Add tests for package path building
Unit tests for get_package_source()
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import tempfile
import os
import shutil
import pytest
from ice_setup.ice import get_package_source, DirNotFound, VersionNotFound
@pytest.fixture
def pkg_path():
pkg_path = tempfile.mkdtemp()
def fin():
shutil.rmtree(pkg_path)
return pkg_path
class TestGetPackageSource(object):
def test_unversioned_missing(self, pkg_path):
with pytest.raises(DirNotFound):
get_package_source(pkg_path, 'ceph')
def test_unversioned_ok(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
path = get_package_source(pkg_path, 'ceph')
assert path == os.path.join(pkg_path, 'ceph')
def test_versioned_missing(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
with pytest.raises(VersionNotFound):
get_package_source(pkg_path, 'ceph', traverse=True)
def test_versioned_ok(self, pkg_path):
os.makedirs(os.path.join(pkg_path, 'ceph/0.80.0'))
path = get_package_source(pkg_path, 'ceph', traverse=True)
assert path == os.path.join(pkg_path, 'ceph/0.80.0')
|
<commit_before><commit_msg>Add tests for package path building
Unit tests for get_package_source()
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import tempfile
import os
import shutil
import pytest
from ice_setup.ice import get_package_source, DirNotFound, VersionNotFound
@pytest.fixture
def pkg_path():
pkg_path = tempfile.mkdtemp()
def fin():
shutil.rmtree(pkg_path)
return pkg_path
class TestGetPackageSource(object):
def test_unversioned_missing(self, pkg_path):
with pytest.raises(DirNotFound):
get_package_source(pkg_path, 'ceph')
def test_unversioned_ok(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
path = get_package_source(pkg_path, 'ceph')
assert path == os.path.join(pkg_path, 'ceph')
def test_versioned_missing(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
with pytest.raises(VersionNotFound):
get_package_source(pkg_path, 'ceph', traverse=True)
def test_versioned_ok(self, pkg_path):
os.makedirs(os.path.join(pkg_path, 'ceph/0.80.0'))
path = get_package_source(pkg_path, 'ceph', traverse=True)
assert path == os.path.join(pkg_path, 'ceph/0.80.0')
|
Add tests for package path building
Unit tests for get_package_source()
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import tempfile
import os
import shutil
import pytest
from ice_setup.ice import get_package_source, DirNotFound, VersionNotFound
@pytest.fixture
def pkg_path():
pkg_path = tempfile.mkdtemp()
def fin():
shutil.rmtree(pkg_path)
return pkg_path
class TestGetPackageSource(object):
def test_unversioned_missing(self, pkg_path):
with pytest.raises(DirNotFound):
get_package_source(pkg_path, 'ceph')
def test_unversioned_ok(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
path = get_package_source(pkg_path, 'ceph')
assert path == os.path.join(pkg_path, 'ceph')
def test_versioned_missing(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
with pytest.raises(VersionNotFound):
get_package_source(pkg_path, 'ceph', traverse=True)
def test_versioned_ok(self, pkg_path):
os.makedirs(os.path.join(pkg_path, 'ceph/0.80.0'))
path = get_package_source(pkg_path, 'ceph', traverse=True)
assert path == os.path.join(pkg_path, 'ceph/0.80.0')
|
<commit_before><commit_msg>Add tests for package path building
Unit tests for get_package_source()
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import tempfile
import os
import shutil
import pytest
from ice_setup.ice import get_package_source, DirNotFound, VersionNotFound
@pytest.fixture
def pkg_path():
pkg_path = tempfile.mkdtemp()
def fin():
shutil.rmtree(pkg_path)
return pkg_path
class TestGetPackageSource(object):
def test_unversioned_missing(self, pkg_path):
with pytest.raises(DirNotFound):
get_package_source(pkg_path, 'ceph')
def test_unversioned_ok(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
path = get_package_source(pkg_path, 'ceph')
assert path == os.path.join(pkg_path, 'ceph')
def test_versioned_missing(self, pkg_path):
os.mkdir(os.path.join(pkg_path, 'ceph'))
with pytest.raises(VersionNotFound):
get_package_source(pkg_path, 'ceph', traverse=True)
def test_versioned_ok(self, pkg_path):
os.makedirs(os.path.join(pkg_path, 'ceph/0.80.0'))
path = get_package_source(pkg_path, 'ceph', traverse=True)
assert path == os.path.join(pkg_path, 'ceph/0.80.0')
|
|
0007289a32262df6ce167f33dd8d30219cce2385
|
run.py
|
run.py
|
from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
Add Flask server file (Bixi statuses as a service)
|
Add Flask server file (Bixi statuses as a service)
|
Python
|
mit
|
dellsystem/bixi-checker,tuxication/bixi-checker
|
Add Flask server file (Bixi statuses as a service)
|
from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
<commit_before><commit_msg>Add Flask server file (Bixi statuses as a service)<commit_after>
|
from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
Add Flask server file (Bixi statuses as a service)from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
<commit_before><commit_msg>Add Flask server file (Bixi statuses as a service)<commit_after>from flask import Flask, request, redirect
import twilio.twiml
from bixiapi import scraper, conf
app = Flask(__name__)
def get_location_info(stations, location, looking_for):
location = str(location).lower()
if location in conf.locations:
for station_id in conf.locations[location]:
try:
station = stations[station_id]
except KeyError:
# Can't find it in the API, weird
continue
num_things = station[looking_for]
station_name = conf.stations.get(station_id, station['name'])
if num_things > 0:
return "%s: %d %s" % (station_name, num_things, looking_for)
# Nothing has been found
return "No stations with %s near %s" % (looking_for, location)
else:
return "Invalid location: %s" % location
@app.route("/", methods=['GET', 'POST'])
def process_request():
stations = scraper.get_stations(conf.city)
body = request.values.get('Body')
station_info = []
locations = body.strip().split(' ')
# If there are two, first is the start, last is the end
if len(locations) == 2:
start_location = locations[0]
end_location = locations[1]
station_info.append(get_location_info(stations, start_location,
'bikes'))
station_info.append(get_location_info(stations, end_location, 'docks'))
else:
# Show bike and dock info for every station
for location in locations:
station_info.append(get_location_info(stations, location, 'bikes'))
station_info.append(get_location_info(stations, location, 'docks'))
resp = twilio.twiml.Response()
resp.sms("\n".join(station_info))
return str(resp)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
|
0719c0e51631074dc6ec41c64186c8f23f60dd75
|
non_semantic_speech_benchmark/cap12/cka.py
|
non_semantic_speech_benchmark/cap12/cka.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute CKA."""
# pylint:disable=invalid-name
import numpy as np
def linear_gram(X):
return np.matmul(X, X.T)
def hsic(K, L):
# X and Y are Nxd
N = K.shape[0]
ones_np = np.ones((N, 1))
H = np.identity(N) - np.matmul(ones_np, ones_np.T) / N
KH = np.matmul(K, H)
LH = np.matmul(L, H)
hsic_np = np.trace(np.matmul(KH, LH)) / (N-1)**2
return hsic_np
def compute_cka(X, Y):
K = linear_gram(X)
L = linear_gram(Y)
hsic_kl = hsic(K, L)
hsic_kk = hsic(K, K)
hsic_ll = hsic(L, L)
cka = hsic_kl / (np.sqrt(hsic_kk) * np.sqrt(hsic_ll))
return cka
def model_pair_cka(model1, model2, common_data_dict):
"""Compute CKA between models."""
X = np.stack(common_data_dict[model1], axis=1)
Y = np.stack(common_data_dict[model2], axis=1)
n_layer_X = X.shape[0]
n_layer_Y = Y.shape[0]
cka = np.zeros((n_layer_X, n_layer_Y))
for x in range(n_layer_X):
print('X:', x, 'of', n_layer_X)
for y in range(n_layer_Y):
cka[x, y] = compute_cka(X[x], Y[y])
return cka
|
Add CKA utils for use in other projects.
|
Add CKA utils for use in other projects.
PiperOrigin-RevId: 461504827
|
Python
|
apache-2.0
|
google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research
|
Add CKA utils for use in other projects.
PiperOrigin-RevId: 461504827
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute CKA."""
# pylint:disable=invalid-name
import numpy as np
def linear_gram(X):
return np.matmul(X, X.T)
def hsic(K, L):
# X and Y are Nxd
N = K.shape[0]
ones_np = np.ones((N, 1))
H = np.identity(N) - np.matmul(ones_np, ones_np.T) / N
KH = np.matmul(K, H)
LH = np.matmul(L, H)
hsic_np = np.trace(np.matmul(KH, LH)) / (N-1)**2
return hsic_np
def compute_cka(X, Y):
K = linear_gram(X)
L = linear_gram(Y)
hsic_kl = hsic(K, L)
hsic_kk = hsic(K, K)
hsic_ll = hsic(L, L)
cka = hsic_kl / (np.sqrt(hsic_kk) * np.sqrt(hsic_ll))
return cka
def model_pair_cka(model1, model2, common_data_dict):
"""Compute CKA between models."""
X = np.stack(common_data_dict[model1], axis=1)
Y = np.stack(common_data_dict[model2], axis=1)
n_layer_X = X.shape[0]
n_layer_Y = Y.shape[0]
cka = np.zeros((n_layer_X, n_layer_Y))
for x in range(n_layer_X):
print('X:', x, 'of', n_layer_X)
for y in range(n_layer_Y):
cka[x, y] = compute_cka(X[x], Y[y])
return cka
|
<commit_before><commit_msg>Add CKA utils for use in other projects.
PiperOrigin-RevId: 461504827<commit_after>
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute CKA."""
# pylint:disable=invalid-name
import numpy as np
def linear_gram(X):
return np.matmul(X, X.T)
def hsic(K, L):
# X and Y are Nxd
N = K.shape[0]
ones_np = np.ones((N, 1))
H = np.identity(N) - np.matmul(ones_np, ones_np.T) / N
KH = np.matmul(K, H)
LH = np.matmul(L, H)
hsic_np = np.trace(np.matmul(KH, LH)) / (N-1)**2
return hsic_np
def compute_cka(X, Y):
K = linear_gram(X)
L = linear_gram(Y)
hsic_kl = hsic(K, L)
hsic_kk = hsic(K, K)
hsic_ll = hsic(L, L)
cka = hsic_kl / (np.sqrt(hsic_kk) * np.sqrt(hsic_ll))
return cka
def model_pair_cka(model1, model2, common_data_dict):
"""Compute CKA between models."""
X = np.stack(common_data_dict[model1], axis=1)
Y = np.stack(common_data_dict[model2], axis=1)
n_layer_X = X.shape[0]
n_layer_Y = Y.shape[0]
cka = np.zeros((n_layer_X, n_layer_Y))
for x in range(n_layer_X):
print('X:', x, 'of', n_layer_X)
for y in range(n_layer_Y):
cka[x, y] = compute_cka(X[x], Y[y])
return cka
|
Add CKA utils for use in other projects.
PiperOrigin-RevId: 461504827# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute CKA."""
# pylint:disable=invalid-name
import numpy as np
def linear_gram(X):
return np.matmul(X, X.T)
def hsic(K, L):
# X and Y are Nxd
N = K.shape[0]
ones_np = np.ones((N, 1))
H = np.identity(N) - np.matmul(ones_np, ones_np.T) / N
KH = np.matmul(K, H)
LH = np.matmul(L, H)
hsic_np = np.trace(np.matmul(KH, LH)) / (N-1)**2
return hsic_np
def compute_cka(X, Y):
K = linear_gram(X)
L = linear_gram(Y)
hsic_kl = hsic(K, L)
hsic_kk = hsic(K, K)
hsic_ll = hsic(L, L)
cka = hsic_kl / (np.sqrt(hsic_kk) * np.sqrt(hsic_ll))
return cka
def model_pair_cka(model1, model2, common_data_dict):
"""Compute CKA between models."""
X = np.stack(common_data_dict[model1], axis=1)
Y = np.stack(common_data_dict[model2], axis=1)
n_layer_X = X.shape[0]
n_layer_Y = Y.shape[0]
cka = np.zeros((n_layer_X, n_layer_Y))
for x in range(n_layer_X):
print('X:', x, 'of', n_layer_X)
for y in range(n_layer_Y):
cka[x, y] = compute_cka(X[x], Y[y])
return cka
|
<commit_before><commit_msg>Add CKA utils for use in other projects.
PiperOrigin-RevId: 461504827<commit_after># coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute CKA."""
# pylint:disable=invalid-name
import numpy as np
def linear_gram(X):
return np.matmul(X, X.T)
def hsic(K, L):
# X and Y are Nxd
N = K.shape[0]
ones_np = np.ones((N, 1))
H = np.identity(N) - np.matmul(ones_np, ones_np.T) / N
KH = np.matmul(K, H)
LH = np.matmul(L, H)
hsic_np = np.trace(np.matmul(KH, LH)) / (N-1)**2
return hsic_np
def compute_cka(X, Y):
K = linear_gram(X)
L = linear_gram(Y)
hsic_kl = hsic(K, L)
hsic_kk = hsic(K, K)
hsic_ll = hsic(L, L)
cka = hsic_kl / (np.sqrt(hsic_kk) * np.sqrt(hsic_ll))
return cka
def model_pair_cka(model1, model2, common_data_dict):
"""Compute CKA between models."""
X = np.stack(common_data_dict[model1], axis=1)
Y = np.stack(common_data_dict[model2], axis=1)
n_layer_X = X.shape[0]
n_layer_Y = Y.shape[0]
cka = np.zeros((n_layer_X, n_layer_Y))
for x in range(n_layer_X):
print('X:', x, 'of', n_layer_X)
for y in range(n_layer_Y):
cka[x, y] = compute_cka(X[x], Y[y])
return cka
|
|
d56629235381aacfcb0c810fd57bc6eb4bb06d24
|
ReligiousPhraseMC/holy_twitter.py
|
ReligiousPhraseMC/holy_twitter.py
|
"""Coordinates the twitter api with the markov chain models"""
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
pass
if __name__ == '__main__':
main()
|
Create the main event loop
|
Create the main event loop
This is the vent loop whihc will monitor twitter, and respond to new
followers, post messages every so often, and hopefully be able to
respond to new direct messages.
|
Python
|
mit
|
salvor7/MarkovChainBibleBot
|
Create the main event loop
This is the vent loop whihc will monitor twitter, and respond to new
followers, post messages every so often, and hopefully be able to
respond to new direct messages.
|
"""Coordinates the twitter api with the markov chain models"""
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create the main event loop
This is the vent loop whihc will monitor twitter, and respond to new
followers, post messages every so often, and hopefully be able to
respond to new direct messages.<commit_after>
|
"""Coordinates the twitter api with the markov chain models"""
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
pass
if __name__ == '__main__':
main()
|
Create the main event loop
This is the vent loop whihc will monitor twitter, and respond to new
followers, post messages every so often, and hopefully be able to
respond to new direct messages."""Coordinates the twitter api with the markov chain models"""
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create the main event loop
This is the vent loop whihc will monitor twitter, and respond to new
followers, post messages every so often, and hopefully be able to
respond to new direct messages.<commit_after>"""Coordinates the twitter api with the markov chain models"""
def main():
"""The main event loop for the holy twitter bot
It watches for twitter events, and posts randomly generated holy text to twitter.
"""
pass
if __name__ == '__main__':
main()
|
|
cb54dc8d1acf5007d15c4a4b0801bedbfbc406e4
|
checkapt.py
|
checkapt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
class CheckApt(object):
def __init__(self):
self.cache = apt.Cache()
def list_installed(self):
pkgs = [i for i in self.cache if i.is_installed]
return pkgs
def main():
check = CheckApt()
p = check.list_installed()
for i in p:
print i.name
if __name__ == '__main__':
main()
|
Add check package installed via python apt.
|
Add check package installed via python apt.
|
Python
|
mit
|
somat/samber
|
Add check package installed via python apt.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
class CheckApt(object):
def __init__(self):
self.cache = apt.Cache()
def list_installed(self):
pkgs = [i for i in self.cache if i.is_installed]
return pkgs
def main():
check = CheckApt()
p = check.list_installed()
for i in p:
print i.name
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add check package installed via python apt.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
class CheckApt(object):
def __init__(self):
self.cache = apt.Cache()
def list_installed(self):
pkgs = [i for i in self.cache if i.is_installed]
return pkgs
def main():
check = CheckApt()
p = check.list_installed()
for i in p:
print i.name
if __name__ == '__main__':
main()
|
Add check package installed via python apt.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
class CheckApt(object):
def __init__(self):
self.cache = apt.Cache()
def list_installed(self):
pkgs = [i for i in self.cache if i.is_installed]
return pkgs
def main():
check = CheckApt()
p = check.list_installed()
for i in p:
print i.name
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add check package installed via python apt.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
class CheckApt(object):
def __init__(self):
self.cache = apt.Cache()
def list_installed(self):
pkgs = [i for i in self.cache if i.is_installed]
return pkgs
def main():
check = CheckApt()
p = check.list_installed()
for i in p:
print i.name
if __name__ == '__main__':
main()
|
|
f00ae3046436f09e62460a8468e031a0c2027e7f
|
scikits/learn/machine/em/__init__.py
|
scikits/learn/machine/em/__init__.py
|
#! /usr/bin/env python
# Last Change: Sun Jul 22 01:00 PM 2007 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
from numpy.testing import NumpyTest
test = NumpyTest().test
def test_suite(*args):
# XXX: this is to avoid recursive call to itself. This is an horrible hack,
# I have no idea why infinite recursion happens otherwise.
if len(args) > 0:
import unittest
return unittest.TestSuite()
np = NumpyTest()
np.testfile_patterns.append(r'test_examples.py')
return np.test(level = -10, verbosity = 5)
|
#! /usr/bin/env python
# Last Change: Sun Sep 07 04:00 PM 2008 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
|
Remove deprecated test runned for em machine.
|
Remove deprecated test runned for em machine.
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@271 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
Python
|
bsd-3-clause
|
xzh86/scikit-learn,Akshay0724/scikit-learn,fyffyt/scikit-learn,jlegendary/scikit-learn,henridwyer/scikit-learn,Djabbz/scikit-learn,shenzebang/scikit-learn,mrshu/scikit-learn,costypetrisor/scikit-learn,jlegendary/scikit-learn,manhhomienbienthuy/scikit-learn,thientu/scikit-learn,marcocaccin/scikit-learn,xavierwu/scikit-learn,krez13/scikit-learn,fabianp/scikit-learn,kmike/scikit-learn,arabenjamin/scikit-learn,NunoEdgarGub1/scikit-learn,frank-tancf/scikit-learn,carrillo/scikit-learn,quheng/scikit-learn,wlamond/scikit-learn,wlamond/scikit-learn,hrjn/scikit-learn,Akshay0724/scikit-learn,phdowling/scikit-learn,466152112/scikit-learn,jakobworldpeace/scikit-learn,billy-inn/scikit-learn,devanshdalal/scikit-learn,vybstat/scikit-learn,terkkila/scikit-learn,hsuantien/scikit-learn,equialgo/scikit-learn,etkirsch/scikit-learn,pnedunuri/scikit-learn,hainm/scikit-learn,qifeigit/scikit-learn,mwv/scikit-learn,rexshihaoren/scikit-learn,zorroblue/scikit-learn,manhhomienbienthuy/scikit-learn,ky822/scikit-learn,deepesch/scikit-learn,bnaul/scikit-learn,ashhher3/scikit-learn,hitszxp/scikit-learn,thilbern/scikit-learn,xubenben/scikit-learn,terkkila/scikit-learn,ivannz/scikit-learn,justincassidy/scikit-learn,meduz/scikit-learn,Aasmi/scikit-learn,robbymeals/scikit-learn,IndraVikas/scikit-learn,simon-pepin/scikit-learn,aabadie/scikit-learn,liyu1990/sklearn,arjoly/scikit-learn,beepee14/scikit-learn,bikong2/scikit-learn,sarahgrogan/scikit-learn,dhruv13J/scikit-learn,ankurankan/scikit-learn,xwolf12/scikit-learn,beepee14/scikit-learn,shangwuhencc/scikit-learn,lin-credible/scikit-learn,jorik041/scikit-learn,fabioticconi/scikit-learn,r-mart/scikit-learn,nmayorov/scikit-learn,hitszxp/scikit-learn,ChanderG/scikit-learn,mattilyra/scikit-learn,mjudsp/Tsallis,potash/scikit-learn,jmschrei/scikit-learn,samzhang111/scikit-learn,appapantula/scikit-learn,cainiaocome/scikit-learn,mxjl620/scikit-learn,herilalaina/scikit-learn,vivekmishra1991/scikit-learn,sumspr/scikit-learn,jm-begon/scikit-learn,ogrisel/scikit-learn,YinongLong/scikit-learn,icdishb/scikit-learn,kjung/scikit-learn,ankurankan/scikit-learn,Vimos/scikit-learn,ningchi/scikit-learn,henrykironde/scikit-learn,0asa/scikit-learn,cauchycui/scikit-learn,NelisVerhoef/scikit-learn,aetilley/scikit-learn,MohammedWasim/scikit-learn,giorgiop/scikit-learn,samuel1208/scikit-learn,voxlol/scikit-learn,Achuth17/scikit-learn,Myasuka/scikit-learn,ndingwall/scikit-learn,qifeigit/scikit-learn,anurag313/scikit-learn,henridwyer/scikit-learn,PrashntS/scikit-learn,evgchz/scikit-learn,bikong2/scikit-learn,Barmaley-exe/scikit-learn,florian-f/sklearn,glemaitre/scikit-learn,roxyboy/scikit-learn,jm-begon/scikit-learn,shikhardb/scikit-learn,kashif/scikit-learn,thientu/scikit-learn,arjoly/scikit-learn,fzalkow/scikit-learn,mehdidc/scikit-learn,imaculate/scikit-learn,mhue/scikit-learn,MohammedWasim/scikit-learn,huobaowangxi/scikit-learn,wlamond/scikit-learn,mwv/scikit-learn,jjx02230808/project0223,shyamalschandra/scikit-learn,equialgo/scikit-learn,mikebenfield/scikit-learn,jereze/scikit-learn,PrashntS/scikit-learn,Garrett-R/scikit-learn,IshankGulati/scikit-learn,AlexandreAbraham/scikit-learn,beepee14/scikit-learn,mojoboss/scikit-learn,sumspr/scikit-learn,akionakamura/scikit-learn,sergeyf/scikit-learn,3manuek/scikit-learn,mjudsp/Tsallis,dsullivan7/scikit-learn,LohithBlaze/scikit-learn,thilbern/scikit-learn,Garrett-R/scikit-learn,JosmanPS/scikit-learn,bhargav/scikit-learn,ngoix/OCRF,hitszxp/scikit-learn,chrsrds/scikit-learn,phdowling/scikit-learn,bthirion/scikit-learn,olologin/scikit-learn,procoder317/scikit-learn,Garrett-R/scikit-learn,RPGOne/scikit-learn,evgchz/scikit-learn,shangwuhencc/scikit-learn,moutai/scikit-learn,hainm/scikit-learn,MatthieuBizien/scikit-learn,cainiaocome/scikit-learn,zorroblue/scikit-learn,jakobworldpeace/scikit-learn,MatthieuBizien/scikit-learn,anurag313/scikit-learn,marcocaccin/scikit-learn,DonBeo/scikit-learn,petosegan/scikit-learn,florian-f/sklearn,pythonvietnam/scikit-learn,ominux/scikit-learn,YinongLong/scikit-learn,ycaihua/scikit-learn,ilyes14/scikit-learn,rsivapr/scikit-learn,lucidfrontier45/scikit-learn,JeanKossaifi/scikit-learn,nesterione/scikit-learn,lin-credible/scikit-learn,yonglehou/scikit-learn,appapantula/scikit-learn,hdmetor/scikit-learn,jorik041/scikit-learn,NunoEdgarGub1/scikit-learn,vibhorag/scikit-learn,espg/scikit-learn,belltailjp/scikit-learn,liyu1990/sklearn,jpautom/scikit-learn,etkirsch/scikit-learn,ltiao/scikit-learn,belltailjp/scikit-learn,BiaDarkia/scikit-learn,AnasGhrab/scikit-learn,zihua/scikit-learn,ssaeger/scikit-learn,Obus/scikit-learn,ltiao/scikit-learn,aminert/scikit-learn,sonnyhu/scikit-learn,yonglehou/scikit-learn,jseabold/scikit-learn,mayblue9/scikit-learn,cybernet14/scikit-learn,BiaDarkia/scikit-learn,hsiaoyi0504/scikit-learn,amueller/scikit-learn,h2educ/scikit-learn,chrisburr/scikit-learn,ky822/scikit-learn,florian-f/sklearn,cybernet14/scikit-learn,vibhorag/scikit-learn,victorbergelin/scikit-learn,mxjl620/scikit-learn,rahul-c1/scikit-learn,simon-pepin/scikit-learn,xyguo/scikit-learn,ephes/scikit-learn,shusenl/scikit-learn,spallavolu/scikit-learn,MartinSavc/scikit-learn,rrohan/scikit-learn,AlexandreAbraham/scikit-learn,B3AU/waveTree,ChanderG/scikit-learn,samuel1208/scikit-learn,cwu2011/scikit-learn,ChanderG/scikit-learn,JPFrancoia/scikit-learn,samzhang111/scikit-learn,heli522/scikit-learn,moutai/scikit-learn,ZENGXH/scikit-learn,ilo10/scikit-learn,NelisVerhoef/scikit-learn,jseabold/scikit-learn,pompiduskus/scikit-learn,ivannz/scikit-learn,shangwuhencc/scikit-learn,jblackburne/scikit-learn,tomlof/scikit-learn,roxyboy/scikit-learn,fzalkow/scikit-learn,q1ang/scikit-learn,madjelan/scikit-learn,macks22/scikit-learn,abimannans/scikit-learn,massmutual/scikit-learn,mattilyra/scikit-learn,AIML/scikit-learn,pnedunuri/scikit-learn,ominux/scikit-learn,nrhine1/scikit-learn,Srisai85/scikit-learn,xavierwu/scikit-learn,massmutual/scikit-learn,LohithBlaze/scikit-learn,ndingwall/scikit-learn,liangz0707/scikit-learn,clemkoa/scikit-learn,jaidevd/scikit-learn,nrhine1/scikit-learn,eg-zhang/scikit-learn,DSLituiev/scikit-learn,scikit-learn/scikit-learn,nikitasingh981/scikit-learn,mjudsp/Tsallis,nesterione/scikit-learn,zorojean/scikit-learn,huobaowangxi/scikit-learn,huobaowangxi/scikit-learn,ningchi/scikit-learn,murali-munna/scikit-learn,rsivapr/scikit-learn,aetilley/scikit-learn,vinayak-mehta/scikit-learn,plissonf/scikit-learn,jm-begon/scikit-learn,stylianos-kampakis/scikit-learn,tdhopper/scikit-learn,xuewei4d/scikit-learn,hainm/scikit-learn,liangz0707/scikit-learn,0x0all/scikit-learn,robbymeals/scikit-learn,0x0all/scikit-learn,dhruv13J/scikit-learn,zorojean/scikit-learn,rajat1994/scikit-learn,ahoyosid/scikit-learn,jlegendary/scikit-learn,meduz/scikit-learn,joshloyal/scikit-learn,bigdataelephants/scikit-learn,dingocuster/scikit-learn,yonglehou/scikit-learn,jkarnows/scikit-learn,amueller/scikit-learn,rrohan/scikit-learn,jaidevd/scikit-learn,zhenv5/scikit-learn,liyu1990/sklearn,elkingtonmcb/scikit-learn,lenovor/scikit-learn,IshankGulati/scikit-learn,kaichogami/scikit-learn,cdegroc/scikit-learn,zaxtax/scikit-learn,3manuek/scikit-learn,CVML/scikit-learn,abhishekgahlot/scikit-learn,mugizico/scikit-learn,yanlend/scikit-learn,mwv/scikit-learn,ngoix/OCRF,nvoron23/scikit-learn,glennq/scikit-learn,h2educ/scikit-learn,Vimos/scikit-learn,zhenv5/scikit-learn,krez13/scikit-learn,mjgrav2001/scikit-learn,nvoron23/scikit-learn,wazeerzulfikar/scikit-learn,Clyde-fare/scikit-learn,khkaminska/scikit-learn,ningchi/scikit-learn,larsmans/scikit-learn,hdmetor/scikit-learn,voxlol/scikit-learn,mayblue9/scikit-learn,kjung/scikit-learn,mojoboss/scikit-learn,hsuantien/scikit-learn,tomlof/scikit-learn,kagayakidan/scikit-learn,mojoboss/scikit-learn,fabioticconi/scikit-learn,ssaeger/scikit-learn,shikhardb/scikit-learn,massmutual/scikit-learn,Djabbz/scikit-learn,AlexRobson/scikit-learn,huzq/scikit-learn,trankmichael/scikit-learn,JsNoNo/scikit-learn,ky822/scikit-learn,gclenaghan/scikit-learn,Aasmi/scikit-learn,RomainBrault/scikit-learn,aabadie/scikit-learn,wzbozon/scikit-learn,lenovor/scikit-learn,jereze/scikit-learn,Garrett-R/scikit-learn,treycausey/scikit-learn,qifeigit/scikit-learn,466152112/scikit-learn,belltailjp/scikit-learn,LiaoPan/scikit-learn,sanketloke/scikit-learn,yanlend/scikit-learn,manashmndl/scikit-learn,Sentient07/scikit-learn,andrewnc/scikit-learn,jakobworldpeace/scikit-learn,nvoron23/scikit-learn,alexeyum/scikit-learn,tawsifkhan/scikit-learn,betatim/scikit-learn,pv/scikit-learn,mattgiguere/scikit-learn,0asa/scikit-learn,cl4rke/scikit-learn,luo66/scikit-learn,Nyker510/scikit-learn,michigraber/scikit-learn,depet/scikit-learn,phdowling/scikit-learn,AlexanderFabisch/scikit-learn,victorbergelin/scikit-learn,yunfeilu/scikit-learn,Nyker510/scikit-learn,ngoix/OCRF,espg/scikit-learn,appapantula/scikit-learn,chrsrds/scikit-learn,vigilv/scikit-learn,RachitKansal/scikit-learn,Adai0808/scikit-learn,siutanwong/scikit-learn,idlead/scikit-learn,ldirer/scikit-learn,bnaul/scikit-learn,pompiduskus/scikit-learn,tdhopper/scikit-learn,wzbozon/scikit-learn,cwu2011/scikit-learn,r-mart/scikit-learn,yyjiang/scikit-learn,wazeerzulfikar/scikit-learn,sgenoud/scikit-learn,etkirsch/scikit-learn,vivekmishra1991/scikit-learn,schets/scikit-learn,devanshdalal/scikit-learn,JPFrancoia/scikit-learn,loli/sklearn-ensembletrees,Adai0808/scikit-learn,davidgbe/scikit-learn,larsmans/scikit-learn,f3r/scikit-learn,shusenl/scikit-learn,jmetzen/scikit-learn,nrhine1/scikit-learn,petosegan/scikit-learn,pnedunuri/scikit-learn,Jimmy-Morzaria/scikit-learn,voxlol/scikit-learn,untom/scikit-learn,joshloyal/scikit-learn,pv/scikit-learn,ngoix/OCRF,shahankhatch/scikit-learn,phdowling/scikit-learn,hdmetor/scikit-learn,rishikksh20/scikit-learn,glemaitre/scikit-learn,IshankGulati/scikit-learn,Windy-Ground/scikit-learn,rahul-c1/scikit-learn,jorge2703/scikit-learn,siutanwong/scikit-learn,hitszxp/scikit-learn,DonBeo/scikit-learn,florian-f/sklearn,jayflo/scikit-learn,MohammedWasim/scikit-learn,sumspr/scikit-learn,devanshdalal/scikit-learn,ClimbsRocks/scikit-learn,alexsavio/scikit-learn,aflaxman/scikit-learn,cdegroc/scikit-learn,q1ang/scikit-learn,xubenben/scikit-learn,PatrickOReilly/scikit-learn,hugobowne/scikit-learn,jjx02230808/project0223,qifeigit/scikit-learn,q1ang/scikit-learn,dsquareindia/scikit-learn,henrykironde/scikit-learn,lbishal/scikit-learn,nikitasingh981/scikit-learn,kevin-intel/scikit-learn,fyffyt/scikit-learn,thilbern/scikit-learn,h2educ/scikit-learn,kagayakidan/scikit-learn,krez13/scikit-learn,cwu2011/scikit-learn,iismd17/scikit-learn,vinayak-mehta/scikit-learn,sonnyhu/scikit-learn,samzhang111/scikit-learn,PatrickOReilly/scikit-learn,ankurankan/scikit-learn,themrmax/scikit-learn,tosolveit/scikit-learn,RomainBrault/scikit-learn,vigilv/scikit-learn,gclenaghan/scikit-learn,CforED/Machine-Learning,mlyundin/scikit-learn,xyguo/scikit-learn,Clyde-fare/scikit-learn,equialgo/scikit-learn,andrewnc/scikit-learn,HolgerPeters/scikit-learn,luo66/scikit-learn,raghavrv/scikit-learn,aflaxman/scikit-learn,glemaitre/scikit-learn,zorroblue/scikit-learn,lucidfrontier45/scikit-learn,Srisai85/scikit-learn,vortex-ape/scikit-learn,djgagne/scikit-learn,kevin-intel/scikit-learn,Nyker510/scikit-learn,PatrickChrist/scikit-learn,dhruv13J/scikit-learn,MatthieuBizien/scikit-learn,waterponey/scikit-learn,ahoyosid/scikit-learn,IndraVikas/scikit-learn,spallavolu/scikit-learn,kylerbrown/scikit-learn,voxlol/scikit-learn,zuku1985/scikit-learn,mhdella/scikit-learn,tomlof/scikit-learn,mattgiguere/scikit-learn,bnaul/scikit-learn,fredhusser/scikit-learn,HolgerPeters/scikit-learn,wanggang3333/scikit-learn,alexeyum/scikit-learn,wlamond/scikit-learn,toastedcornflakes/scikit-learn,pkruskal/scikit-learn,maheshakya/scikit-learn,shangwuhencc/scikit-learn,mlyundin/scikit-learn,Lawrence-Liu/scikit-learn,rvraghav93/scikit-learn,Barmaley-exe/scikit-learn,vybstat/scikit-learn,Fireblend/scikit-learn,shahankhatch/scikit-learn,justincassidy/scikit-learn,pythonvietnam/scikit-learn,fredhusser/scikit-learn,bthirion/scikit-learn,depet/scikit-learn,AlexanderFabisch/scikit-learn,ndingwall/scikit-learn,vigilv/scikit-learn,B3AU/waveTree,xuewei4d/scikit-learn,pypot/scikit-learn,lbishal/scikit-learn,jakirkham/scikit-learn,Achuth17/scikit-learn,nmayorov/scikit-learn,vortex-ape/scikit-learn,vinayak-mehta/scikit-learn,dingocuster/scikit-learn,tawsifkhan/scikit-learn,mehdidc/scikit-learn,bikong2/scikit-learn,mojoboss/scikit-learn,PatrickOReilly/scikit-learn,Aasmi/scikit-learn,vermouthmjl/scikit-learn,robin-lai/scikit-learn,shenzebang/scikit-learn,Titan-C/scikit-learn,rishikksh20/scikit-learn,raghavrv/scikit-learn,robin-lai/scikit-learn,meduz/scikit-learn,akionakamura/scikit-learn,btabibian/scikit-learn,ZENGXH/scikit-learn,heli522/scikit-learn,hsiaoyi0504/scikit-learn,alvarofierroclavero/scikit-learn,mlyundin/scikit-learn,hugobowne/scikit-learn,loli/semisupervisedforests,Windy-Ground/scikit-learn,shahankhatch/scikit-learn,lucidfrontier45/scikit-learn,wanggang3333/scikit-learn,ahoyosid/scikit-learn,madjelan/scikit-learn,quheng/scikit-learn,altairpearl/scikit-learn,hsuantien/scikit-learn,gotomypc/scikit-learn,MartinDelzant/scikit-learn,fzalkow/scikit-learn,poryfly/scikit-learn,nrhine1/scikit-learn,RachitKansal/scikit-learn,davidgbe/scikit-learn,xwolf12/scikit-learn,MartinSavc/scikit-learn,evgchz/scikit-learn,anntzer/scikit-learn,aetilley/scikit-learn,tosolveit/scikit-learn,AlexandreAbraham/scikit-learn,rishikksh20/scikit-learn,trungnt13/scikit-learn,LiaoPan/scikit-learn,Achuth17/scikit-learn,f3r/scikit-learn,justincassidy/scikit-learn,manashmndl/scikit-learn,zaxtax/scikit-learn,saiwing-yeung/scikit-learn,wzbozon/scikit-learn,giorgiop/scikit-learn,sgenoud/scikit-learn,ilo10/scikit-learn,stylianos-kampakis/scikit-learn,OshynSong/scikit-learn,LohithBlaze/scikit-learn,siutanwong/scikit-learn,kagayakidan/scikit-learn,jm-begon/scikit-learn,pompiduskus/scikit-learn,Srisai85/scikit-learn,JPFrancoia/scikit-learn,Sentient07/scikit-learn,IssamLaradji/scikit-learn,abimannans/scikit-learn,MechCoder/scikit-learn,smartscheduling/scikit-learn-categorical-tree,rvraghav93/scikit-learn,dsullivan7/scikit-learn,nelson-liu/scikit-learn,TomDLT/scikit-learn,IssamLaradji/scikit-learn,mblondel/scikit-learn,ashhher3/scikit-learn,andaag/scikit-learn,petosegan/scikit-learn,ZenDevelopmentSystems/scikit-learn,nikitasingh981/scikit-learn,samuel1208/scikit-learn,massmutual/scikit-learn,hdmetor/scikit-learn,theoryno3/scikit-learn,JPFrancoia/scikit-learn,glennq/scikit-learn,frank-tancf/scikit-learn,anirudhjayaraman/scikit-learn,anirudhjayaraman/scikit-learn,ssaeger/scikit-learn,beepee14/scikit-learn,cl4rke/scikit-learn,xavierwu/scikit-learn,theoryno3/scikit-learn,glouppe/scikit-learn,mattilyra/scikit-learn,imaculate/scikit-learn,luo66/scikit-learn,Akshay0724/scikit-learn,ephes/scikit-learn,harshaneelhg/scikit-learn,mayblue9/scikit-learn,Srisai85/scikit-learn,jakirkham/scikit-learn,0x0all/scikit-learn,ZenDevelopmentSystems/scikit-learn,yanlend/scikit-learn,cdegroc/scikit-learn,amueller/scikit-learn,pythonvietnam/scikit-learn,jayflo/scikit-learn,untom/scikit-learn,plissonf/scikit-learn,trankmichael/scikit-learn,elkingtonmcb/scikit-learn,B3AU/waveTree,amueller/scikit-learn,alvarofierroclavero/scikit-learn,nomadcube/scikit-learn,vibhorag/scikit-learn,mfjb/scikit-learn,aetilley/scikit-learn,icdishb/scikit-learn,ankurankan/scikit-learn,jakobworldpeace/scikit-learn,billy-inn/scikit-learn,hlin117/scikit-learn,terkkila/scikit-learn,clemkoa/scikit-learn,walterreade/scikit-learn,arahuja/scikit-learn,kevin-intel/scikit-learn,maheshakya/scikit-learn,spallavolu/scikit-learn,heli522/scikit-learn,Akshay0724/scikit-learn,xzh86/scikit-learn,anirudhjayaraman/scikit-learn,wazeerzulfikar/scikit-learn,fbagirov/scikit-learn,cl4rke/scikit-learn,mblondel/scikit-learn,roxyboy/scikit-learn,eickenberg/scikit-learn,altairpearl/scikit-learn,pratapvardhan/scikit-learn,jayflo/scikit-learn,fbagirov/scikit-learn,ilyes14/scikit-learn,idlead/scikit-learn,devanshdalal/scikit-learn,zuku1985/scikit-learn,MechCoder/scikit-learn,theoryno3/scikit-learn,schets/scikit-learn,3manuek/scikit-learn,wzbozon/scikit-learn,adamgreenhall/scikit-learn,petosegan/scikit-learn,rohanp/scikit-learn,jmetzen/scikit-learn,manhhomienbienthuy/scikit-learn,ogrisel/scikit-learn,pkruskal/scikit-learn,RachitKansal/scikit-learn,idlead/scikit-learn,billy-inn/scikit-learn,herilalaina/scikit-learn,vshtanko/scikit-learn,ngoix/OCRF,bthirion/scikit-learn,herilalaina/scikit-learn,ominux/scikit-learn,fbagirov/scikit-learn,YinongLong/scikit-learn,trankmichael/scikit-learn,glouppe/scikit-learn,rajat1994/scikit-learn,thientu/scikit-learn,maheshakya/scikit-learn,shenzebang/scikit-learn,pianomania/scikit-learn,Obus/scikit-learn,vibhorag/scikit-learn,pratapvardhan/scikit-learn,btabibian/scikit-learn,arjoly/scikit-learn,Barmaley-exe/scikit-learn,costypetrisor/scikit-learn,elkingtonmcb/scikit-learn,aminert/scikit-learn,andrewnc/scikit-learn,walterreade/scikit-learn,akionakamura/scikit-learn,f3r/scikit-learn,smartscheduling/scikit-learn-categorical-tree,heli522/scikit-learn,ElDeveloper/scikit-learn,mikebenfield/scikit-learn,nomadcube/scikit-learn,depet/scikit-learn,shusenl/scikit-learn,iismd17/scikit-learn,michigraber/scikit-learn,tmhm/scikit-learn,JosmanPS/scikit-learn,macks22/scikit-learn,Titan-C/scikit-learn,ycaihua/scikit-learn,bikong2/scikit-learn,pv/scikit-learn,Fireblend/scikit-learn,mehdidc/scikit-learn,mhdella/scikit-learn,plissonf/scikit-learn,clemkoa/scikit-learn,jlegendary/scikit-learn,AlexRobson/scikit-learn,ycaihua/scikit-learn,dsquareindia/scikit-learn,manashmndl/scikit-learn,stylianos-kampakis/scikit-learn,aewhatley/scikit-learn,larsmans/scikit-learn,tmhm/scikit-learn,rajat1994/scikit-learn,aabadie/scikit-learn,nomadcube/scikit-learn,vortex-ape/scikit-learn,liyu1990/sklearn,smartscheduling/scikit-learn-categorical-tree,smartscheduling/scikit-learn-categorical-tree,henridwyer/scikit-learn,IssamLaradji/scikit-learn,cybernet14/scikit-learn,mrshu/scikit-learn,JosmanPS/scikit-learn,treycausey/scikit-learn,simon-pepin/scikit-learn,poryfly/scikit-learn,hitszxp/scikit-learn,jaidevd/scikit-learn,djgagne/scikit-learn,yask123/scikit-learn,sinhrks/scikit-learn,MatthieuBizien/scikit-learn,pnedunuri/scikit-learn,ZENGXH/scikit-learn,procoder317/scikit-learn,costypetrisor/scikit-learn,vshtanko/scikit-learn,jmetzen/scikit-learn,evgchz/scikit-learn,jkarnows/scikit-learn,ogrisel/scikit-learn,equialgo/scikit-learn,jorge2703/scikit-learn,glouppe/scikit-learn,trungnt13/scikit-learn,PatrickChrist/scikit-learn,ishanic/scikit-learn,glouppe/scikit-learn,abhishekkrthakur/scikit-learn,davidgbe/scikit-learn,jpautom/scikit-learn,liberatorqjw/scikit-learn,fengzhyuan/scikit-learn,huobaowangxi/scikit-learn,B3AU/waveTree,Jimmy-Morzaria/scikit-learn,vshtanko/scikit-learn,mfjb/scikit-learn,NunoEdgarGub1/scikit-learn,untom/scikit-learn,fabioticconi/scikit-learn,walterreade/scikit-learn,ldirer/scikit-learn,themrmax/scikit-learn,arabenjamin/scikit-learn,mhdella/scikit-learn,aabadie/scikit-learn,rsivapr/scikit-learn,zuku1985/scikit-learn,yunfeilu/scikit-learn,etkirsch/scikit-learn,btabibian/scikit-learn,chrisburr/scikit-learn,michigraber/scikit-learn,fredhusser/scikit-learn,waterponey/scikit-learn,NunoEdgarGub1/scikit-learn,mrshu/scikit-learn,AIML/scikit-learn,madjelan/scikit-learn,robbymeals/scikit-learn,sinhrks/scikit-learn,Sentient07/scikit-learn,ephes/scikit-learn,alexeyum/scikit-learn,fyffyt/scikit-learn,AnasGhrab/scikit-learn,murali-munna/scikit-learn,sarahgrogan/scikit-learn,rsivapr/scikit-learn,andaag/scikit-learn,ishanic/scikit-learn,lenovor/scikit-learn,raghavrv/scikit-learn,jseabold/scikit-learn,scikit-learn/scikit-learn,anurag313/scikit-learn,shusenl/scikit-learn,chrisburr/scikit-learn,RPGOne/scikit-learn,procoder317/scikit-learn,arjoly/scikit-learn,lin-credible/scikit-learn,AIML/scikit-learn,alvarofierroclavero/scikit-learn,yyjiang/scikit-learn,tawsifkhan/scikit-learn,russel1237/scikit-learn,jjx02230808/project0223,Barmaley-exe/scikit-learn,mrshu/scikit-learn,mfjb/scikit-learn,icdishb/scikit-learn,waterponey/scikit-learn,saiwing-yeung/scikit-learn,jzt5132/scikit-learn,joshloyal/scikit-learn,depet/scikit-learn,rexshihaoren/scikit-learn,xiaoxiamii/scikit-learn,RayMick/scikit-learn,sarahgrogan/scikit-learn,anntzer/scikit-learn,ky822/scikit-learn,russel1237/scikit-learn,pypot/scikit-learn,RPGOne/scikit-learn,vivekmishra1991/scikit-learn,rahuldhote/scikit-learn,loli/sklearn-ensembletrees,yonglehou/scikit-learn,ZENGXH/scikit-learn,AlexanderFabisch/scikit-learn,mjudsp/Tsallis,mattilyra/scikit-learn,rexshihaoren/scikit-learn,rahuldhote/scikit-learn,zuku1985/scikit-learn,CforED/Machine-Learning,RayMick/scikit-learn,wanggang3333/scikit-learn,eg-zhang/scikit-learn,lesteve/scikit-learn,kaichogami/scikit-learn,ClimbsRocks/scikit-learn,AlexanderFabisch/scikit-learn,AnasGhrab/scikit-learn,quheng/scikit-learn,joernhees/scikit-learn,mhue/scikit-learn,zihua/scikit-learn,dingocuster/scikit-learn,dsullivan7/scikit-learn,pypot/scikit-learn,yask123/scikit-learn,mattilyra/scikit-learn,Windy-Ground/scikit-learn,bhargav/scikit-learn,nmayorov/scikit-learn,jblackburne/scikit-learn,sgenoud/scikit-learn,mblondel/scikit-learn,themrmax/scikit-learn,vermouthmjl/scikit-learn,Myasuka/scikit-learn,treycausey/scikit-learn,schets/scikit-learn,cauchycui/scikit-learn,lazywei/scikit-learn,hlin117/scikit-learn,RomainBrault/scikit-learn,huzq/scikit-learn,cainiaocome/scikit-learn,eg-zhang/scikit-learn,olologin/scikit-learn,sarahgrogan/scikit-learn,evgchz/scikit-learn,alexsavio/scikit-learn,IndraVikas/scikit-learn,jseabold/scikit-learn,sinhrks/scikit-learn,RPGOne/scikit-learn,chrisburr/scikit-learn,mjgrav2001/scikit-learn,CforED/Machine-Learning,rohanp/scikit-learn,carrillo/scikit-learn,Garrett-R/scikit-learn,henrykironde/scikit-learn,betatim/scikit-learn,loli/sklearn-ensembletrees,tdhopper/scikit-learn,IndraVikas/scikit-learn,belltailjp/scikit-learn,jereze/scikit-learn,rohanp/scikit-learn,nmayorov/scikit-learn,rahuldhote/scikit-learn,Obus/scikit-learn,liberatorqjw/scikit-learn,sergeyf/scikit-learn,Fireblend/scikit-learn,xubenben/scikit-learn,UNR-AERIAL/scikit-learn,jjx02230808/project0223,untom/scikit-learn,nhejazi/scikit-learn,davidgbe/scikit-learn,Djabbz/scikit-learn,CVML/scikit-learn,fabianp/scikit-learn,mwv/scikit-learn,glemaitre/scikit-learn,jorik041/scikit-learn,xiaoxiamii/scikit-learn,loli/semisupervisedforests,Myasuka/scikit-learn,hainm/scikit-learn,ominux/scikit-learn,ssaeger/scikit-learn,AlexandreAbraham/scikit-learn,adamgreenhall/scikit-learn,ElDeveloper/scikit-learn,potash/scikit-learn,macks22/scikit-learn,TomDLT/scikit-learn,xavierwu/scikit-learn,dsquareindia/scikit-learn,466152112/scikit-learn,khkaminska/scikit-learn,carrillo/scikit-learn,LiaoPan/scikit-learn,bnaul/scikit-learn,russel1237/scikit-learn,imaculate/scikit-learn,xubenben/scikit-learn,jorge2703/scikit-learn,alexsavio/scikit-learn,manhhomienbienthuy/scikit-learn,giorgiop/scikit-learn,idlead/scikit-learn,lenovor/scikit-learn,altairpearl/scikit-learn,zorojean/scikit-learn,JsNoNo/scikit-learn,UNR-AERIAL/scikit-learn,HolgerPeters/scikit-learn,arahuja/scikit-learn,zhenv5/scikit-learn,cl4rke/scikit-learn,nvoron23/scikit-learn,iismd17/scikit-learn,rvraghav93/scikit-learn,mjgrav2001/scikit-learn,loli/semisupervisedforests,tosolveit/scikit-learn,djgagne/scikit-learn,ashhher3/scikit-learn,lesteve/scikit-learn,466152112/scikit-learn,mxjl620/scikit-learn,procoder317/scikit-learn,luo66/scikit-learn,hrjn/scikit-learn,NelisVerhoef/scikit-learn,saiwing-yeung/scikit-learn,pv/scikit-learn,DSLituiev/scikit-learn,robin-lai/scikit-learn,ClimbsRocks/scikit-learn,wanggang3333/scikit-learn,kashif/scikit-learn,deepesch/scikit-learn,olologin/scikit-learn,trankmichael/scikit-learn,russel1237/scikit-learn,trungnt13/scikit-learn,kagayakidan/scikit-learn,TomDLT/scikit-learn,olologin/scikit-learn,huzq/scikit-learn,shyamalschandra/scikit-learn,ChanChiChoi/scikit-learn,CforED/Machine-Learning,ycaihua/scikit-learn,HolgerPeters/scikit-learn,fyffyt/scikit-learn,mhdella/scikit-learn,imaculate/scikit-learn,bhargav/scikit-learn,sinhrks/scikit-learn,nelson-liu/scikit-learn,MartinSavc/scikit-learn,rahul-c1/scikit-learn,r-mart/scikit-learn,frank-tancf/scikit-learn,Nyker510/scikit-learn,ChanderG/scikit-learn,hlin117/scikit-learn,jblackburne/scikit-learn,zorojean/scikit-learn,iismd17/scikit-learn,0asa/scikit-learn,mjgrav2001/scikit-learn,pratapvardhan/scikit-learn,schets/scikit-learn,MechCoder/scikit-learn,RomainBrault/scikit-learn,florian-f/sklearn,pianomania/scikit-learn,abhishekgahlot/scikit-learn,zaxtax/scikit-learn,trungnt13/scikit-learn,altairpearl/scikit-learn,TomDLT/scikit-learn,ilyes14/scikit-learn,shyamalschandra/scikit-learn,Lawrence-Liu/scikit-learn,elkingtonmcb/scikit-learn,hrjn/scikit-learn,abimannans/scikit-learn,chrsrds/scikit-learn,mhue/scikit-learn,vermouthmjl/scikit-learn,mugizico/scikit-learn,rahuldhote/scikit-learn,ElDeveloper/scikit-learn,jorik041/scikit-learn,vinayak-mehta/scikit-learn,jkarnows/scikit-learn,LohithBlaze/scikit-learn,shyamalschandra/scikit-learn,joshloyal/scikit-learn,kmike/scikit-learn,rrohan/scikit-learn,ltiao/scikit-learn,f3r/scikit-learn,joernhees/scikit-learn,ankurankan/scikit-learn,Titan-C/scikit-learn,andaag/scikit-learn,yyjiang/scikit-learn,poryfly/scikit-learn,jayflo/scikit-learn,fzalkow/scikit-learn,lazywei/scikit-learn,spallavolu/scikit-learn,IshankGulati/scikit-learn,ogrisel/scikit-learn,saiwing-yeung/scikit-learn,ZenDevelopmentSystems/scikit-learn,jaidevd/scikit-learn,joernhees/scikit-learn,vshtanko/scikit-learn,Obus/scikit-learn,anurag313/scikit-learn,Djabbz/scikit-learn,kashif/scikit-learn,aewhatley/scikit-learn,terkkila/scikit-learn,Clyde-fare/scikit-learn,gotomypc/scikit-learn,bhargav/scikit-learn,ChanChiChoi/scikit-learn,eickenberg/scikit-learn,maheshakya/scikit-learn,glennq/scikit-learn,liangz0707/scikit-learn,walterreade/scikit-learn,marcocaccin/scikit-learn,liberatorqjw/scikit-learn,marcocaccin/scikit-learn,rrohan/scikit-learn,mugizico/scikit-learn,tomlof/scikit-learn,jmetzen/scikit-learn,gotomypc/scikit-learn,waterponey/scikit-learn,vybstat/scikit-learn,0x0all/scikit-learn,AIML/scikit-learn,stylianos-kampakis/scikit-learn,frank-tancf/scikit-learn,robbymeals/scikit-learn,harshaneelhg/scikit-learn,RayMick/scikit-learn,aflaxman/scikit-learn,xuewei4d/scikit-learn,hugobowne/scikit-learn,murali-munna/scikit-learn,Lawrence-Liu/scikit-learn,Sentient07/scikit-learn,kevin-intel/scikit-learn,YinongLong/scikit-learn,espg/scikit-learn,0asa/scikit-learn,mrshu/scikit-learn,IssamLaradji/scikit-learn,JsNoNo/scikit-learn,aewhatley/scikit-learn,rahul-c1/scikit-learn,arahuja/scikit-learn,alvarofierroclavero/scikit-learn,aflaxman/scikit-learn,lucidfrontier45/scikit-learn,yyjiang/scikit-learn,clemkoa/scikit-learn,plissonf/scikit-learn,zaxtax/scikit-learn,yask123/scikit-learn,bigdataelephants/scikit-learn,kjung/scikit-learn,gotomypc/scikit-learn,ChanChiChoi/scikit-learn,h2educ/scikit-learn,ningchi/scikit-learn,Aasmi/scikit-learn,fengzhyuan/scikit-learn,larsmans/scikit-learn,pkruskal/scikit-learn,BiaDarkia/scikit-learn,vigilv/scikit-learn,harshaneelhg/scikit-learn,ilo10/scikit-learn,OshynSong/scikit-learn,RayMick/scikit-learn,quheng/scikit-learn,anntzer/scikit-learn,liangz0707/scikit-learn,UNR-AERIAL/scikit-learn,liberatorqjw/scikit-learn,lbishal/scikit-learn,kylerbrown/scikit-learn,justincassidy/scikit-learn,macks22/scikit-learn,djgagne/scikit-learn,khkaminska/scikit-learn,hugobowne/scikit-learn,ishanic/scikit-learn,JeanKossaifi/scikit-learn,Lawrence-Liu/scikit-learn,eickenberg/scikit-learn,ltiao/scikit-learn,kjung/scikit-learn,mattgiguere/scikit-learn,tosolveit/scikit-learn,hlin117/scikit-learn,victorbergelin/scikit-learn,eickenberg/scikit-learn,OshynSong/scikit-learn,carrillo/scikit-learn,toastedcornflakes/scikit-learn,tmhm/scikit-learn,arabenjamin/scikit-learn,jpautom/scikit-learn,aminert/scikit-learn,nhejazi/scikit-learn,nesterione/scikit-learn,adamgreenhall/scikit-learn,sonnyhu/scikit-learn,deepesch/scikit-learn,abhishekkrthakur/scikit-learn,bthirion/scikit-learn,Jimmy-Morzaria/scikit-learn,mblondel/scikit-learn,sergeyf/scikit-learn,MartinSavc/scikit-learn,mlyundin/scikit-learn,rishikksh20/scikit-learn,Jimmy-Morzaria/scikit-learn,Titan-C/scikit-learn,kashif/scikit-learn,kaichogami/scikit-learn,ishanic/scikit-learn,JosmanPS/scikit-learn,JeanKossaifi/scikit-learn,anirudhjayaraman/scikit-learn,vortex-ape/scikit-learn,herilalaina/scikit-learn,sonnyhu/scikit-learn,kmike/scikit-learn,icdishb/scikit-learn,AlexRobson/scikit-learn,raghavrv/scikit-learn,jmschrei/scikit-learn,cainiaocome/scikit-learn,thilbern/scikit-learn,betatim/scikit-learn,AlexRobson/scikit-learn,zihua/scikit-learn,nomadcube/scikit-learn,q1ang/scikit-learn,fbagirov/scikit-learn,DonBeo/scikit-learn,Vimos/scikit-learn,chrsrds/scikit-learn,DonBeo/scikit-learn,alexeyum/scikit-learn,cybernet14/scikit-learn,fabioticconi/scikit-learn,rsivapr/scikit-learn,anntzer/scikit-learn,abhishekkrthakur/scikit-learn,LiaoPan/scikit-learn,mikebenfield/scikit-learn,jmschrei/scikit-learn,CVML/scikit-learn,rajat1994/scikit-learn,fabianp/scikit-learn,zihua/scikit-learn,xyguo/scikit-learn,sgenoud/scikit-learn,pypot/scikit-learn,btabibian/scikit-learn,xiaoxiamii/scikit-learn,sanketloke/scikit-learn,hsiaoyi0504/scikit-learn,abhishekgahlot/scikit-learn,madjelan/scikit-learn,arahuja/scikit-learn,ashhher3/scikit-learn,yunfeilu/scikit-learn,bigdataelephants/scikit-learn,hrjn/scikit-learn,krez13/scikit-learn,jblackburne/scikit-learn,pkruskal/scikit-learn,sumspr/scikit-learn,mxjl620/scikit-learn,depet/scikit-learn,lbishal/scikit-learn,pythonvietnam/scikit-learn,Adai0808/scikit-learn,yunfeilu/scikit-learn,roxyboy/scikit-learn,MechCoder/scikit-learn,jmschrei/scikit-learn,samuel1208/scikit-learn,sgenoud/scikit-learn,nikitasingh981/scikit-learn,glennq/scikit-learn,abimannans/scikit-learn,shahankhatch/scikit-learn,kylerbrown/scikit-learn,cauchycui/scikit-learn,JeanKossaifi/scikit-learn,loli/sklearn-ensembletrees,ElDeveloper/scikit-learn,loli/semisupervisedforests,shikhardb/scikit-learn,mhue/scikit-learn,xyguo/scikit-learn,xwolf12/scikit-learn,abhishekgahlot/scikit-learn,lesteve/scikit-learn,fabianp/scikit-learn,costypetrisor/scikit-learn,lazywei/scikit-learn,jakirkham/scikit-learn,samzhang111/scikit-learn,DSLituiev/scikit-learn,vermouthmjl/scikit-learn,lazywei/scikit-learn,MohammedWasim/scikit-learn,wazeerzulfikar/scikit-learn,hsiaoyi0504/scikit-learn,kylerbrown/scikit-learn,nhejazi/scikit-learn,simon-pepin/scikit-learn,mikebenfield/scikit-learn,scikit-learn/scikit-learn,loli/sklearn-ensembletrees,Vimos/scikit-learn,abhishekgahlot/scikit-learn,zhenv5/scikit-learn,pianomania/scikit-learn,shenzebang/scikit-learn,jzt5132/scikit-learn,andrewnc/scikit-learn,OshynSong/scikit-learn,xiaoxiamii/scikit-learn,treycausey/scikit-learn,nelson-liu/scikit-learn,DSLituiev/scikit-learn,manashmndl/scikit-learn,kmike/scikit-learn,tdhopper/scikit-learn,Clyde-fare/scikit-learn,sanketloke/scikit-learn,murali-munna/scikit-learn,mjudsp/Tsallis,jakirkham/scikit-learn,dingocuster/scikit-learn,ivannz/scikit-learn,fredhusser/scikit-learn,ycaihua/scikit-learn,siutanwong/scikit-learn,pratapvardhan/scikit-learn,vivekmishra1991/scikit-learn,kmike/scikit-learn,dsquareindia/scikit-learn,adamgreenhall/scikit-learn,larsmans/scikit-learn,xzh86/scikit-learn,akionakamura/scikit-learn,aminert/scikit-learn,3manuek/scikit-learn,MartinDelzant/scikit-learn,nhejazi/scikit-learn,xzh86/scikit-learn,henridwyer/scikit-learn,RachitKansal/scikit-learn,mattgiguere/scikit-learn,potash/scikit-learn,AnasGhrab/scikit-learn,cwu2011/scikit-learn,treycausey/scikit-learn,aewhatley/scikit-learn,cauchycui/scikit-learn,betatim/scikit-learn,PatrickChrist/scikit-learn,andaag/scikit-learn,BiaDarkia/scikit-learn,eickenberg/scikit-learn,sergeyf/scikit-learn,bigdataelephants/scikit-learn,theoryno3/scikit-learn,nesterione/scikit-learn,appapantula/scikit-learn,mehdidc/scikit-learn,Achuth17/scikit-learn,ZenDevelopmentSystems/scikit-learn,r-mart/scikit-learn,giorgiop/scikit-learn,victorbergelin/scikit-learn,lucidfrontier45/scikit-learn,thientu/scikit-learn,yanlend/scikit-learn,michigraber/scikit-learn,jorge2703/scikit-learn,scikit-learn/scikit-learn,maheshakya/scikit-learn,dhruv13J/scikit-learn,moutai/scikit-learn,meduz/scikit-learn,lesteve/scikit-learn,espg/scikit-learn,eg-zhang/scikit-learn,mugizico/scikit-learn,jereze/scikit-learn,PatrickChrist/scikit-learn,mfjb/scikit-learn,nelson-liu/scikit-learn,CVML/scikit-learn,JsNoNo/scikit-learn,arabenjamin/scikit-learn,abhishekkrthakur/scikit-learn,MartinDelzant/scikit-learn,shikhardb/scikit-learn,harshaneelhg/scikit-learn,hsuantien/scikit-learn,ldirer/scikit-learn,ndingwall/scikit-learn,Myasuka/scikit-learn,Adai0808/scikit-learn,themrmax/scikit-learn,jkarnows/scikit-learn,ivannz/scikit-learn,ilyes14/scikit-learn,fengzhyuan/scikit-learn,huzq/scikit-learn,rohanp/scikit-learn,pianomania/scikit-learn,toastedcornflakes/scikit-learn,xwolf12/scikit-learn,moutai/scikit-learn,billy-inn/scikit-learn,potash/scikit-learn,ldirer/scikit-learn,ahoyosid/scikit-learn,PatrickOReilly/scikit-learn,xuewei4d/scikit-learn,robin-lai/scikit-learn,alexsavio/scikit-learn,fengzhyuan/scikit-learn,PrashntS/scikit-learn,ephes/scikit-learn,henrykironde/scikit-learn,zorroblue/scikit-learn,dsullivan7/scikit-learn,gclenaghan/scikit-learn,toastedcornflakes/scikit-learn,rexshihaoren/scikit-learn,gclenaghan/scikit-learn,NelisVerhoef/scikit-learn,kaichogami/scikit-learn,sanketloke/scikit-learn,khkaminska/scikit-learn,ChanChiChoi/scikit-learn,jpautom/scikit-learn,pompiduskus/scikit-learn,jzt5132/scikit-learn,mayblue9/scikit-learn,deepesch/scikit-learn,0x0all/scikit-learn,tmhm/scikit-learn,tawsifkhan/scikit-learn,Fireblend/scikit-learn,0asa/scikit-learn,PrashntS/scikit-learn,poryfly/scikit-learn,Windy-Ground/scikit-learn,MartinDelzant/scikit-learn,ngoix/OCRF,jzt5132/scikit-learn,vybstat/scikit-learn,rvraghav93/scikit-learn,yask123/scikit-learn,cdegroc/scikit-learn,UNR-AERIAL/scikit-learn,ClimbsRocks/scikit-learn,joernhees/scikit-learn,lin-credible/scikit-learn,ilo10/scikit-learn,B3AU/waveTree
|
#! /usr/bin/env python
# Last Change: Sun Jul 22 01:00 PM 2007 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
from numpy.testing import NumpyTest
test = NumpyTest().test
def test_suite(*args):
# XXX: this is to avoid recursive call to itself. This is an horrible hack,
# I have no idea why infinite recursion happens otherwise.
if len(args) > 0:
import unittest
return unittest.TestSuite()
np = NumpyTest()
np.testfile_patterns.append(r'test_examples.py')
return np.test(level = -10, verbosity = 5)
Remove deprecated test runned for em machine.
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@271 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
#! /usr/bin/env python
# Last Change: Sun Sep 07 04:00 PM 2008 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
|
<commit_before>#! /usr/bin/env python
# Last Change: Sun Jul 22 01:00 PM 2007 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
from numpy.testing import NumpyTest
test = NumpyTest().test
def test_suite(*args):
# XXX: this is to avoid recursive call to itself. This is an horrible hack,
# I have no idea why infinite recursion happens otherwise.
if len(args) > 0:
import unittest
return unittest.TestSuite()
np = NumpyTest()
np.testfile_patterns.append(r'test_examples.py')
return np.test(level = -10, verbosity = 5)
<commit_msg>Remove deprecated test runned for em machine.
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@271 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>
|
#! /usr/bin/env python
# Last Change: Sun Sep 07 04:00 PM 2008 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
|
#! /usr/bin/env python
# Last Change: Sun Jul 22 01:00 PM 2007 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
from numpy.testing import NumpyTest
test = NumpyTest().test
def test_suite(*args):
# XXX: this is to avoid recursive call to itself. This is an horrible hack,
# I have no idea why infinite recursion happens otherwise.
if len(args) > 0:
import unittest
return unittest.TestSuite()
np = NumpyTest()
np.testfile_patterns.append(r'test_examples.py')
return np.test(level = -10, verbosity = 5)
Remove deprecated test runned for em machine.
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@271 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8#! /usr/bin/env python
# Last Change: Sun Sep 07 04:00 PM 2008 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
|
<commit_before>#! /usr/bin/env python
# Last Change: Sun Jul 22 01:00 PM 2007 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
from numpy.testing import NumpyTest
test = NumpyTest().test
def test_suite(*args):
# XXX: this is to avoid recursive call to itself. This is an horrible hack,
# I have no idea why infinite recursion happens otherwise.
if len(args) > 0:
import unittest
return unittest.TestSuite()
np = NumpyTest()
np.testfile_patterns.append(r'test_examples.py')
return np.test(level = -10, verbosity = 5)
<commit_msg>Remove deprecated test runned for em machine.
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@271 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>#! /usr/bin/env python
# Last Change: Sun Sep 07 04:00 PM 2008 J
from info import __doc__
from gauss_mix import GmParamError, GM
from gmm_em import GmmParamError, GMM, EM
from online_em import OnGMM as _OnGMM
__all__ = filter(lambda s:not s.startswith('_'), dir())
|
73666cdc83e55aefcf4796c286d5d11fe2cac956
|
py/next-greater-element-ii.py
|
py/next-greater-element-ii.py
|
class Solution(object):
def nextGreaterElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
stack = []
lnum = len(nums)
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
stack.append(n)
ans = [None] * lnum
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
if not stack:
ans[i] = -1
else:
ans[i] = stack[-1]
stack.append(n)
return ans
|
Add py solution for 503. Next Greater Element II
|
Add py solution for 503. Next Greater Element II
503. Next Greater Element II: https://leetcode.com/problems/next-greater-element-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 503. Next Greater Element II
503. Next Greater Element II: https://leetcode.com/problems/next-greater-element-ii/
|
class Solution(object):
def nextGreaterElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
stack = []
lnum = len(nums)
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
stack.append(n)
ans = [None] * lnum
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
if not stack:
ans[i] = -1
else:
ans[i] = stack[-1]
stack.append(n)
return ans
|
<commit_before><commit_msg>Add py solution for 503. Next Greater Element II
503. Next Greater Element II: https://leetcode.com/problems/next-greater-element-ii/<commit_after>
|
class Solution(object):
def nextGreaterElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
stack = []
lnum = len(nums)
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
stack.append(n)
ans = [None] * lnum
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
if not stack:
ans[i] = -1
else:
ans[i] = stack[-1]
stack.append(n)
return ans
|
Add py solution for 503. Next Greater Element II
503. Next Greater Element II: https://leetcode.com/problems/next-greater-element-ii/class Solution(object):
def nextGreaterElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
stack = []
lnum = len(nums)
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
stack.append(n)
ans = [None] * lnum
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
if not stack:
ans[i] = -1
else:
ans[i] = stack[-1]
stack.append(n)
return ans
|
<commit_before><commit_msg>Add py solution for 503. Next Greater Element II
503. Next Greater Element II: https://leetcode.com/problems/next-greater-element-ii/<commit_after>class Solution(object):
def nextGreaterElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
stack = []
lnum = len(nums)
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
stack.append(n)
ans = [None] * lnum
for i in xrange(lnum - 1, -1, -1):
n = nums[i]
while stack and stack[-1] <= n:
stack.pop()
if not stack:
ans[i] = -1
else:
ans[i] = stack[-1]
stack.append(n)
return ans
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.