commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56c933ad84837f60f5dbb36c2a8c0fc26b1d5ce9
|
project/scripts/main.py
|
project/scripts/main.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
from fetch_trends import get_updated_daily_data
def get_queries():
"""
Fetch investment data from database. Hard code for now.
returns a list of ("query", "investment_date") tuples
"""
search_queries = ["impeachment", "donald trump", "weather forecast", "giraffe", "chicken nuggets"]
dates = ["2020-12-15", "2021-01-01", "2021-01-07", "2020-11-30", "2020-12-25"]
return zip(search_queries, dates)
def update_database(data):
"""
Add daily search data for term to Cloud Firestore db, overwriting old data if present
"""
return
if __name__ == "__main__":
queries = get_queries()
for query in queries:
daily_data = get_updated_daily_data(*query)
update_database(daily_data)
|
Add skeleton code for fetching and updating database data. Hard coded for now
|
Add skeleton code for fetching and updating database data. Hard coded for now
|
Python
|
apache-2.0
|
googleinterns/sgonks,googleinterns/sgonks,googleinterns/sgonks,googleinterns/sgonks
|
Add skeleton code for fetching and updating database data. Hard coded for now
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
from fetch_trends import get_updated_daily_data
def get_queries():
"""
Fetch investment data from database. Hard code for now.
returns a list of ("query", "investment_date") tuples
"""
search_queries = ["impeachment", "donald trump", "weather forecast", "giraffe", "chicken nuggets"]
dates = ["2020-12-15", "2021-01-01", "2021-01-07", "2020-11-30", "2020-12-25"]
return zip(search_queries, dates)
def update_database(data):
"""
Add daily search data for term to Cloud Firestore db, overwriting old data if present
"""
return
if __name__ == "__main__":
queries = get_queries()
for query in queries:
daily_data = get_updated_daily_data(*query)
update_database(daily_data)
|
<commit_before><commit_msg>Add skeleton code for fetching and updating database data. Hard coded for now<commit_after>
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
from fetch_trends import get_updated_daily_data
def get_queries():
"""
Fetch investment data from database. Hard code for now.
returns a list of ("query", "investment_date") tuples
"""
search_queries = ["impeachment", "donald trump", "weather forecast", "giraffe", "chicken nuggets"]
dates = ["2020-12-15", "2021-01-01", "2021-01-07", "2020-11-30", "2020-12-25"]
return zip(search_queries, dates)
def update_database(data):
"""
Add daily search data for term to Cloud Firestore db, overwriting old data if present
"""
return
if __name__ == "__main__":
queries = get_queries()
for query in queries:
daily_data = get_updated_daily_data(*query)
update_database(daily_data)
|
Add skeleton code for fetching and updating database data. Hard coded for now# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
from fetch_trends import get_updated_daily_data
def get_queries():
"""
Fetch investment data from database. Hard code for now.
returns a list of ("query", "investment_date") tuples
"""
search_queries = ["impeachment", "donald trump", "weather forecast", "giraffe", "chicken nuggets"]
dates = ["2020-12-15", "2021-01-01", "2021-01-07", "2020-11-30", "2020-12-25"]
return zip(search_queries, dates)
def update_database(data):
"""
Add daily search data for term to Cloud Firestore db, overwriting old data if present
"""
return
if __name__ == "__main__":
queries = get_queries()
for query in queries:
daily_data = get_updated_daily_data(*query)
update_database(daily_data)
|
<commit_before><commit_msg>Add skeleton code for fetching and updating database data. Hard coded for now<commit_after># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
from fetch_trends import get_updated_daily_data
def get_queries():
"""
Fetch investment data from database. Hard code for now.
returns a list of ("query", "investment_date") tuples
"""
search_queries = ["impeachment", "donald trump", "weather forecast", "giraffe", "chicken nuggets"]
dates = ["2020-12-15", "2021-01-01", "2021-01-07", "2020-11-30", "2020-12-25"]
return zip(search_queries, dates)
def update_database(data):
"""
Add daily search data for term to Cloud Firestore db, overwriting old data if present
"""
return
if __name__ == "__main__":
queries = get_queries()
for query in queries:
daily_data = get_updated_daily_data(*query)
update_database(daily_data)
|
|
00c61d58532a6ace185e8740bf4d7cbbfa2c960a
|
ogusa/tests/test_data.py
|
ogusa/tests/test_data.py
|
import pytest
# from ogusa.execute.scipts import runner
def test_set_data(monkeypatch):
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogusa.txfunc import get_tax_func_estimate, tax_func_estimate
from ogusa import get_micro_data
mocked_fn = get_micro_data
baseline=False
start_year=2016
reform = {2017: {"_II_em": [10000]}}
calc = get_micro_data.get_calculator(baseline, start_year, reform=reform,
data="cps")
# blind_head is only in the CPS file. See taxcalc/records_variables.json
assert calc.records.blind_head.sum() > 0
|
Add test for cps file
|
Add test for cps file
|
Python
|
mit
|
OpenSourcePolicyCenter/dynamic,OpenSourcePolicyCenter/dynamic,OpenSourcePolicyCenter/dynamic,OpenSourcePolicyCenter/dynamic,OpenSourcePolicyCenter/dynamic
|
Add test for cps file
|
import pytest
# from ogusa.execute.scipts import runner
def test_set_data(monkeypatch):
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogusa.txfunc import get_tax_func_estimate, tax_func_estimate
from ogusa import get_micro_data
mocked_fn = get_micro_data
baseline=False
start_year=2016
reform = {2017: {"_II_em": [10000]}}
calc = get_micro_data.get_calculator(baseline, start_year, reform=reform,
data="cps")
# blind_head is only in the CPS file. See taxcalc/records_variables.json
assert calc.records.blind_head.sum() > 0
|
<commit_before><commit_msg>Add test for cps file<commit_after>
|
import pytest
# from ogusa.execute.scipts import runner
def test_set_data(monkeypatch):
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogusa.txfunc import get_tax_func_estimate, tax_func_estimate
from ogusa import get_micro_data
mocked_fn = get_micro_data
baseline=False
start_year=2016
reform = {2017: {"_II_em": [10000]}}
calc = get_micro_data.get_calculator(baseline, start_year, reform=reform,
data="cps")
# blind_head is only in the CPS file. See taxcalc/records_variables.json
assert calc.records.blind_head.sum() > 0
|
Add test for cps fileimport pytest
# from ogusa.execute.scipts import runner
def test_set_data(monkeypatch):
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogusa.txfunc import get_tax_func_estimate, tax_func_estimate
from ogusa import get_micro_data
mocked_fn = get_micro_data
baseline=False
start_year=2016
reform = {2017: {"_II_em": [10000]}}
calc = get_micro_data.get_calculator(baseline, start_year, reform=reform,
data="cps")
# blind_head is only in the CPS file. See taxcalc/records_variables.json
assert calc.records.blind_head.sum() > 0
|
<commit_before><commit_msg>Add test for cps file<commit_after>import pytest
# from ogusa.execute.scipts import runner
def test_set_data(monkeypatch):
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogusa.txfunc import get_tax_func_estimate, tax_func_estimate
from ogusa import get_micro_data
mocked_fn = get_micro_data
baseline=False
start_year=2016
reform = {2017: {"_II_em": [10000]}}
calc = get_micro_data.get_calculator(baseline, start_year, reform=reform,
data="cps")
# blind_head is only in the CPS file. See taxcalc/records_variables.json
assert calc.records.blind_head.sum() > 0
|
|
9b7d8998066c7963de11e32bd5755f9e1fff0baf
|
migrations/versions/6a8c19565060_move_to_flask.py
|
migrations/versions/6a8c19565060_move_to_flask.py
|
"""Rename tables to account for Flask-SQLAlchemy's auto-naming.
Unlike our own (old) table name generator, Flask-SQLAlchemy inserts
underscores in names that are CamelCase (i.e. table names are snake_case).
There's no reason to keep the old behavior, but we need this migration script
otherwise.
Revision ID: 6a8c19565060
Revises: None
Create Date: 2016-03-15 23:40:11.411599
"""
# revision identifiers, used by Alembic.
revision = '6a8c19565060'
down_revision = None
from alembic import op
import sys
def upgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('powerconnect55xx', 'power_connect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mockswitch', 'mock_switch')
op.rename_table('networkattachment', 'network_attachment')
op.rename_table('networkingaction', 'networking_action')
def downgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('power_connect55xx', 'powerconnect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mock_switch', 'mockswitch')
op.rename_table('network_attachment', 'networkattachment')
op.rename_table('networking_action', 'networkingaction')
|
Add a migration script accounting for flask changes
|
Add a migration script accounting for flask changes
Per the script's docstring, the only thing about the schema that's
changed is that the table names Flask-SQLAlchemy generates are slightly
different. The schema is otherwise the same, with one caveat:
With Postgres, for every table that has an AUTO INCREMENT id, another
relation will appear as ${tablename}_id_seq. The migration script
doesn't change these names, so if you're upgrading from the old db you
get the tables `network_attachment` and `networkattachment_id_seq`,
whereas if you're initializing a fresh database, you get
`network_attachment_id_seq`.
This shouldn't actually cause any problems, but it's a bit awkward.
|
Python
|
apache-2.0
|
SahilTikale/haas,henn/hil,henn/haas,henn/hil_sahil,kylehogan/haas,meng-sun/hil,meng-sun/hil,kylehogan/hil,CCI-MOC/haas,henn/hil,kylehogan/hil,henn/hil_sahil
|
Add a migration script accounting for flask changes
Per the script's docstring, the only thing about the schema that's
changed is that the table names Flask-SQLAlchemy generates are slightly
different. The schema is otherwise the same, with one caveat:
With Postgres, for every table that has an AUTO INCREMENT id, another
relation will appear as ${tablename}_id_seq. The migration script
doesn't change these names, so if you're upgrading from the old db you
get the tables `network_attachment` and `networkattachment_id_seq`,
whereas if you're initializing a fresh database, you get
`network_attachment_id_seq`.
This shouldn't actually cause any problems, but it's a bit awkward.
|
"""Rename tables to account for Flask-SQLAlchemy's auto-naming.
Unlike our own (old) table name generator, Flask-SQLAlchemy inserts
underscores in names that are CamelCase (i.e. table names are snake_case).
There's no reason to keep the old behavior, but we need this migration script
otherwise.
Revision ID: 6a8c19565060
Revises: None
Create Date: 2016-03-15 23:40:11.411599
"""
# revision identifiers, used by Alembic.
revision = '6a8c19565060'
down_revision = None
from alembic import op
import sys
def upgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('powerconnect55xx', 'power_connect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mockswitch', 'mock_switch')
op.rename_table('networkattachment', 'network_attachment')
op.rename_table('networkingaction', 'networking_action')
def downgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('power_connect55xx', 'powerconnect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mock_switch', 'mockswitch')
op.rename_table('network_attachment', 'networkattachment')
op.rename_table('networking_action', 'networkingaction')
|
<commit_before><commit_msg>Add a migration script accounting for flask changes
Per the script's docstring, the only thing about the schema that's
changed is that the table names Flask-SQLAlchemy generates are slightly
different. The schema is otherwise the same, with one caveat:
With Postgres, for every table that has an AUTO INCREMENT id, another
relation will appear as ${tablename}_id_seq. The migration script
doesn't change these names, so if you're upgrading from the old db you
get the tables `network_attachment` and `networkattachment_id_seq`,
whereas if you're initializing a fresh database, you get
`network_attachment_id_seq`.
This shouldn't actually cause any problems, but it's a bit awkward.<commit_after>
|
"""Rename tables to account for Flask-SQLAlchemy's auto-naming.
Unlike our own (old) table name generator, Flask-SQLAlchemy inserts
underscores in names that are CamelCase (i.e. table names are snake_case).
There's no reason to keep the old behavior, but we need this migration script
otherwise.
Revision ID: 6a8c19565060
Revises: None
Create Date: 2016-03-15 23:40:11.411599
"""
# revision identifiers, used by Alembic.
revision = '6a8c19565060'
down_revision = None
from alembic import op
import sys
def upgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('powerconnect55xx', 'power_connect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mockswitch', 'mock_switch')
op.rename_table('networkattachment', 'network_attachment')
op.rename_table('networkingaction', 'networking_action')
def downgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('power_connect55xx', 'powerconnect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mock_switch', 'mockswitch')
op.rename_table('network_attachment', 'networkattachment')
op.rename_table('networking_action', 'networkingaction')
|
Add a migration script accounting for flask changes
Per the script's docstring, the only thing about the schema that's
changed is that the table names Flask-SQLAlchemy generates are slightly
different. The schema is otherwise the same, with one caveat:
With Postgres, for every table that has an AUTO INCREMENT id, another
relation will appear as ${tablename}_id_seq. The migration script
doesn't change these names, so if you're upgrading from the old db you
get the tables `network_attachment` and `networkattachment_id_seq`,
whereas if you're initializing a fresh database, you get
`network_attachment_id_seq`.
This shouldn't actually cause any problems, but it's a bit awkward."""Rename tables to account for Flask-SQLAlchemy's auto-naming.
Unlike our own (old) table name generator, Flask-SQLAlchemy inserts
underscores in names that are CamelCase (i.e. table names are snake_case).
There's no reason to keep the old behavior, but we need this migration script
otherwise.
Revision ID: 6a8c19565060
Revises: None
Create Date: 2016-03-15 23:40:11.411599
"""
# revision identifiers, used by Alembic.
revision = '6a8c19565060'
down_revision = None
from alembic import op
import sys
def upgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('powerconnect55xx', 'power_connect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mockswitch', 'mock_switch')
op.rename_table('networkattachment', 'network_attachment')
op.rename_table('networkingaction', 'networking_action')
def downgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('power_connect55xx', 'powerconnect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mock_switch', 'mockswitch')
op.rename_table('network_attachment', 'networkattachment')
op.rename_table('networking_action', 'networkingaction')
|
<commit_before><commit_msg>Add a migration script accounting for flask changes
Per the script's docstring, the only thing about the schema that's
changed is that the table names Flask-SQLAlchemy generates are slightly
different. The schema is otherwise the same, with one caveat:
With Postgres, for every table that has an AUTO INCREMENT id, another
relation will appear as ${tablename}_id_seq. The migration script
doesn't change these names, so if you're upgrading from the old db you
get the tables `network_attachment` and `networkattachment_id_seq`,
whereas if you're initializing a fresh database, you get
`network_attachment_id_seq`.
This shouldn't actually cause any problems, but it's a bit awkward.<commit_after>"""Rename tables to account for Flask-SQLAlchemy's auto-naming.
Unlike our own (old) table name generator, Flask-SQLAlchemy inserts
underscores in names that are CamelCase (i.e. table names are snake_case).
There's no reason to keep the old behavior, but we need this migration script
otherwise.
Revision ID: 6a8c19565060
Revises: None
Create Date: 2016-03-15 23:40:11.411599
"""
# revision identifiers, used by Alembic.
revision = '6a8c19565060'
down_revision = None
from alembic import op
import sys
def upgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('powerconnect55xx', 'power_connect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mockswitch', 'mock_switch')
op.rename_table('networkattachment', 'network_attachment')
op.rename_table('networkingaction', 'networking_action')
def downgrade():
if 'haas.ext.switches.dell' in sys.modules:
op.rename_table('power_connect55xx', 'powerconnect55xx')
if 'haas.ext.switches.mock' in sys.modules:
op.rename_table('mock_switch', 'mockswitch')
op.rename_table('network_attachment', 'networkattachment')
op.rename_table('networking_action', 'networkingaction')
|
|
00462a7270f48846a5287520f5764a190b11928c
|
migrations/versions/530a92c42c5_.py
|
migrations/versions/530a92c42c5_.py
|
"""empty message
Revision ID: 530a92c42c5
Revises: 3255e6bed08
Create Date: 2016-08-09 19:46:36.752946
"""
# revision identifiers, used by Alembic.
revision = '530a92c42c5'
down_revision = '3255e6bed08'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('photo', 'datetime')
op.drop_column('photo', 'title')
op.drop_column('photo', 'description')
op.add_column('post', sa.Column('private', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'private')
op.add_column('photo', sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('title', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('datetime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
Add migration: add 'private' field to Post table, drop Photo table
|
Add migration: add 'private' field to Post table, drop Photo table
|
Python
|
mit
|
heejongahn/hjlog,heejongahn/hjlog,heejongahn/hjlog,heejongahn/hjlog
|
Add migration: add 'private' field to Post table, drop Photo table
|
"""empty message
Revision ID: 530a92c42c5
Revises: 3255e6bed08
Create Date: 2016-08-09 19:46:36.752946
"""
# revision identifiers, used by Alembic.
revision = '530a92c42c5'
down_revision = '3255e6bed08'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('photo', 'datetime')
op.drop_column('photo', 'title')
op.drop_column('photo', 'description')
op.add_column('post', sa.Column('private', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'private')
op.add_column('photo', sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('title', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('datetime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration: add 'private' field to Post table, drop Photo table<commit_after>
|
"""empty message
Revision ID: 530a92c42c5
Revises: 3255e6bed08
Create Date: 2016-08-09 19:46:36.752946
"""
# revision identifiers, used by Alembic.
revision = '530a92c42c5'
down_revision = '3255e6bed08'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('photo', 'datetime')
op.drop_column('photo', 'title')
op.drop_column('photo', 'description')
op.add_column('post', sa.Column('private', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'private')
op.add_column('photo', sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('title', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('datetime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
Add migration: add 'private' field to Post table, drop Photo table"""empty message
Revision ID: 530a92c42c5
Revises: 3255e6bed08
Create Date: 2016-08-09 19:46:36.752946
"""
# revision identifiers, used by Alembic.
revision = '530a92c42c5'
down_revision = '3255e6bed08'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('photo', 'datetime')
op.drop_column('photo', 'title')
op.drop_column('photo', 'description')
op.add_column('post', sa.Column('private', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'private')
op.add_column('photo', sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('title', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('datetime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration: add 'private' field to Post table, drop Photo table<commit_after>"""empty message
Revision ID: 530a92c42c5
Revises: 3255e6bed08
Create Date: 2016-08-09 19:46:36.752946
"""
# revision identifiers, used by Alembic.
revision = '530a92c42c5'
down_revision = '3255e6bed08'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('photo', 'datetime')
op.drop_column('photo', 'title')
op.drop_column('photo', 'description')
op.add_column('post', sa.Column('private', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'private')
op.add_column('photo', sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('title', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('photo', sa.Column('datetime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
|
fc2ab4cadd4c4bac34c35aeaeb3047da30524063
|
driver27/management/commands/testing_multirecord.py
|
driver27/management/commands/testing_multirecord.py
|
from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Competition, Season
from driver27.records import get_record_config
class Command(BaseCommand):
help = 'Test multirecord'
def get_multirecord(self, driver, multirecords, **kwargs):
multiple_records = {}
for multirecord in multirecords:
record_config = get_record_config(multirecord).get('filter')
record_stat = driver.get_stats(**dict(kwargs, **record_config))
multiple_records[multirecord] = record_stat
return multiple_records
def handle(self, *args, **options):
driver = Driver.objects.get(pk=1)
multirecords = ['PODIUM', 'POLE', 'WIN', 'FASTEST']
competition = Competition.objects.get(pk=1)
season_1 = Season.objects.get(pk=1)
season_3 = Season.objects.get(pk=3)
print(self.get_multirecord(driver, multirecords=multirecords, competition=competition))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_1))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_3))
|
Add command to testing future implementation of get multiple records for driver profile.
|
Add command to testing future implementation of get multiple records for driver profile.
|
Python
|
mit
|
SRJ9/django-driver27,SRJ9/django-driver27,SRJ9/django-driver27
|
Add command to testing future implementation of get multiple records for driver profile.
|
from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Competition, Season
from driver27.records import get_record_config
class Command(BaseCommand):
help = 'Test multirecord'
def get_multirecord(self, driver, multirecords, **kwargs):
multiple_records = {}
for multirecord in multirecords:
record_config = get_record_config(multirecord).get('filter')
record_stat = driver.get_stats(**dict(kwargs, **record_config))
multiple_records[multirecord] = record_stat
return multiple_records
def handle(self, *args, **options):
driver = Driver.objects.get(pk=1)
multirecords = ['PODIUM', 'POLE', 'WIN', 'FASTEST']
competition = Competition.objects.get(pk=1)
season_1 = Season.objects.get(pk=1)
season_3 = Season.objects.get(pk=3)
print(self.get_multirecord(driver, multirecords=multirecords, competition=competition))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_1))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_3))
|
<commit_before><commit_msg>Add command to testing future implementation of get multiple records for driver profile.<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Competition, Season
from driver27.records import get_record_config
class Command(BaseCommand):
help = 'Test multirecord'
def get_multirecord(self, driver, multirecords, **kwargs):
multiple_records = {}
for multirecord in multirecords:
record_config = get_record_config(multirecord).get('filter')
record_stat = driver.get_stats(**dict(kwargs, **record_config))
multiple_records[multirecord] = record_stat
return multiple_records
def handle(self, *args, **options):
driver = Driver.objects.get(pk=1)
multirecords = ['PODIUM', 'POLE', 'WIN', 'FASTEST']
competition = Competition.objects.get(pk=1)
season_1 = Season.objects.get(pk=1)
season_3 = Season.objects.get(pk=3)
print(self.get_multirecord(driver, multirecords=multirecords, competition=competition))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_1))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_3))
|
Add command to testing future implementation of get multiple records for driver profile.from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Competition, Season
from driver27.records import get_record_config
class Command(BaseCommand):
help = 'Test multirecord'
def get_multirecord(self, driver, multirecords, **kwargs):
multiple_records = {}
for multirecord in multirecords:
record_config = get_record_config(multirecord).get('filter')
record_stat = driver.get_stats(**dict(kwargs, **record_config))
multiple_records[multirecord] = record_stat
return multiple_records
def handle(self, *args, **options):
driver = Driver.objects.get(pk=1)
multirecords = ['PODIUM', 'POLE', 'WIN', 'FASTEST']
competition = Competition.objects.get(pk=1)
season_1 = Season.objects.get(pk=1)
season_3 = Season.objects.get(pk=3)
print(self.get_multirecord(driver, multirecords=multirecords, competition=competition))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_1))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_3))
|
<commit_before><commit_msg>Add command to testing future implementation of get multiple records for driver profile.<commit_after>from django.core.management.base import BaseCommand, CommandError
from driver27.models import Driver, Competition, Season
from driver27.records import get_record_config
class Command(BaseCommand):
help = 'Test multirecord'
def get_multirecord(self, driver, multirecords, **kwargs):
multiple_records = {}
for multirecord in multirecords:
record_config = get_record_config(multirecord).get('filter')
record_stat = driver.get_stats(**dict(kwargs, **record_config))
multiple_records[multirecord] = record_stat
return multiple_records
def handle(self, *args, **options):
driver = Driver.objects.get(pk=1)
multirecords = ['PODIUM', 'POLE', 'WIN', 'FASTEST']
competition = Competition.objects.get(pk=1)
season_1 = Season.objects.get(pk=1)
season_3 = Season.objects.get(pk=3)
print(self.get_multirecord(driver, multirecords=multirecords, competition=competition))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_1))
print(self.get_multirecord(driver, multirecords=multirecords, season=season_3))
|
|
e9917a9f4855ebb3b464977a26c7549baeb6a610
|
contrib/satgpio.py
|
contrib/satgpio.py
|
"""
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {"CATNR": catalog_number, "INTDES": international_designator, "NAME": name}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
|
Add contrib code to load GP data from Celestrak
|
Add contrib code to load GP data from Celestrak
|
Python
|
mit
|
poliastro/poliastro
|
Add contrib code to load GP data from Celestrak
|
"""
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {"CATNR": catalog_number, "INTDES": international_designator, "NAME": name}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
|
<commit_before><commit_msg>Add contrib code to load GP data from Celestrak<commit_after>
|
"""
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {"CATNR": catalog_number, "INTDES": international_designator, "NAME": name}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
|
Add contrib code to load GP data from Celestrak"""
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {"CATNR": catalog_number, "INTDES": international_designator, "NAME": name}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
|
<commit_before><commit_msg>Add contrib code to load GP data from Celestrak<commit_after>"""
Author: Juan Luis Cano Rodríguez
Code to read GP data from Celestrak using the HTTP API and python-sgp4.
Requires some extra dependencies:
$ pip install httpx sgp4
"""
import io
import json
import xml.etree.ElementTree as ET
import httpx
from sgp4 import exporter, omm
from sgp4.api import Satrec
def _generate_url(catalog_number, international_designator, name):
params = {"CATNR": catalog_number, "INTDES": international_designator, "NAME": name}
param_names = [
param_name
for param_name, param_value in params.items()
if param_value is not None
]
if len(param_names) != 1:
raise ValueError(
"Specify exactly one of catalog_number, international_designator, or name"
)
param_name = param_names[0]
param_value = params[param_name]
url = (
"https://celestrak.com/NORAD/elements/gp.php?"
f"{param_name}={param_value}"
"&FORMAT=XML"
)
return url
def _make_query(url):
response = httpx.get(url)
response.raise_for_status()
if response.text == "No GP data found":
raise ValueError(
f"Query '{url}' did not return any results, try a different one"
)
tree = ET.parse(io.StringIO(response.text))
root = tree.getroot()
if len(root) != 1:
raise ValueError(
f"Query '{url}' returned {len(root)} results, try a different one"
)
fields = next(omm.parse_xml(io.StringIO(response.text)))
return fields
def load_gp_from_celestrak(
*, catalog_number=None, international_designator=None, name=None
):
"""Load general perturbations orbital data from Celestrak.
Returns
-------
Satrec
Orbital data from specified object.
Notes
-----
This uses the OMM XML format from Celestrak as described in [1]_.
References
----------
.. [1] Kelso, T.S. "A New Way to Obtain GP Data (aka TLEs)"
https://celestrak.com/NORAD/documentation/gp-data-formats.php
"""
# Assemble query, raise an error if malformed
url = _generate_url(catalog_number, international_designator, name)
# Make API call, raise an error if data is malformed
fields = _make_query(url)
# Initialize and return Satrec object
sat = Satrec()
omm.initialize(sat, fields)
return sat
def print_sat(sat, name):
"""Prints Satrec object in convenient form."""
print(json.dumps(exporter.export_omm(sat, name), indent=2))
|
|
0008b7f22ee3d5f16c1d6bf4e62211633818a30c
|
project/tools/mktopic.py
|
project/tools/mktopic.py
|
#!/usr/bin/env python
#
# mk_topic.py
#
# Make a notebook for a new topic
#
import datetime
import os
import sys
import lib
from jinja2 import Environment, FileSystemLoader
from lib import nullstrip, slugify
template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
src_template = template_env.get_template("source_base.ipynb")
def mktopic(title):
"Takes a list of the words of a topic title and builds the appropriate notebook file."
now = datetime.datetime.today()
title = " ".join(title)
slug = lib.slugify(title)
nb_name = slug+".ipynb"
dst_file = os.path.join("nbsource", nb_name)
render_context = {"slug": slug,
"title": title,
"date": now.date(),
"time": now.time(),
"src_file": dst_file} # we are writing a source ...
if os.path.isfile(dst_file):
# If the topic exists do not overwrite it XXX [unless option set].
sys.exit("file {} already exists".format(dst_file))
dst_nb_file = open(dst_file, "w")
nb_content = src_template.render(render_context)
dst_nb_file.write(nb_content)
dst_nb_file.close()
if __name__ == "__main__":
# possible -d option for directory?
if len(sys.argv) < 2:
sys.exit("Sorry, I need a topic name - just one!")
mktopic(sys.argv[1:])
|
Add new utility to make source documents
|
Add new utility to make source documents
|
Python
|
mit
|
holdenweb/nbtools,holdenweb/nbtools
|
Add new utility to make source documents
|
#!/usr/bin/env python
#
# mk_topic.py
#
# Make a notebook for a new topic
#
import datetime
import os
import sys
import lib
from jinja2 import Environment, FileSystemLoader
from lib import nullstrip, slugify
template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
src_template = template_env.get_template("source_base.ipynb")
def mktopic(title):
"Takes a list of the words of a topic title and builds the appropriate notebook file."
now = datetime.datetime.today()
title = " ".join(title)
slug = lib.slugify(title)
nb_name = slug+".ipynb"
dst_file = os.path.join("nbsource", nb_name)
render_context = {"slug": slug,
"title": title,
"date": now.date(),
"time": now.time(),
"src_file": dst_file} # we are writing a source ...
if os.path.isfile(dst_file):
# If the topic exists do not overwrite it XXX [unless option set].
sys.exit("file {} already exists".format(dst_file))
dst_nb_file = open(dst_file, "w")
nb_content = src_template.render(render_context)
dst_nb_file.write(nb_content)
dst_nb_file.close()
if __name__ == "__main__":
# possible -d option for directory?
if len(sys.argv) < 2:
sys.exit("Sorry, I need a topic name - just one!")
mktopic(sys.argv[1:])
|
<commit_before><commit_msg>Add new utility to make source documents<commit_after>
|
#!/usr/bin/env python
#
# mk_topic.py
#
# Make a notebook for a new topic
#
import datetime
import os
import sys
import lib
from jinja2 import Environment, FileSystemLoader
from lib import nullstrip, slugify
template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
src_template = template_env.get_template("source_base.ipynb")
def mktopic(title):
"Takes a list of the words of a topic title and builds the appropriate notebook file."
now = datetime.datetime.today()
title = " ".join(title)
slug = lib.slugify(title)
nb_name = slug+".ipynb"
dst_file = os.path.join("nbsource", nb_name)
render_context = {"slug": slug,
"title": title,
"date": now.date(),
"time": now.time(),
"src_file": dst_file} # we are writing a source ...
if os.path.isfile(dst_file):
# If the topic exists do not overwrite it XXX [unless option set].
sys.exit("file {} already exists".format(dst_file))
dst_nb_file = open(dst_file, "w")
nb_content = src_template.render(render_context)
dst_nb_file.write(nb_content)
dst_nb_file.close()
if __name__ == "__main__":
# possible -d option for directory?
if len(sys.argv) < 2:
sys.exit("Sorry, I need a topic name - just one!")
mktopic(sys.argv[1:])
|
Add new utility to make source documents#!/usr/bin/env python
#
# mk_topic.py
#
# Make a notebook for a new topic
#
import datetime
import os
import sys
import lib
from jinja2 import Environment, FileSystemLoader
from lib import nullstrip, slugify
template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
src_template = template_env.get_template("source_base.ipynb")
def mktopic(title):
"Takes a list of the words of a topic title and builds the appropriate notebook file."
now = datetime.datetime.today()
title = " ".join(title)
slug = lib.slugify(title)
nb_name = slug+".ipynb"
dst_file = os.path.join("nbsource", nb_name)
render_context = {"slug": slug,
"title": title,
"date": now.date(),
"time": now.time(),
"src_file": dst_file} # we are writing a source ...
if os.path.isfile(dst_file):
# If the topic exists do not overwrite it XXX [unless option set].
sys.exit("file {} already exists".format(dst_file))
dst_nb_file = open(dst_file, "w")
nb_content = src_template.render(render_context)
dst_nb_file.write(nb_content)
dst_nb_file.close()
if __name__ == "__main__":
# possible -d option for directory?
if len(sys.argv) < 2:
sys.exit("Sorry, I need a topic name - just one!")
mktopic(sys.argv[1:])
|
<commit_before><commit_msg>Add new utility to make source documents<commit_after>#!/usr/bin/env python
#
# mk_topic.py
#
# Make a notebook for a new topic
#
import datetime
import os
import sys
import lib
from jinja2 import Environment, FileSystemLoader
from lib import nullstrip, slugify
template_env = Environment(loader=FileSystemLoader("data/templates"))
# -t template would be nice, but this will do for now
src_template = template_env.get_template("source_base.ipynb")
def mktopic(title):
"Takes a list of the words of a topic title and builds the appropriate notebook file."
now = datetime.datetime.today()
title = " ".join(title)
slug = lib.slugify(title)
nb_name = slug+".ipynb"
dst_file = os.path.join("nbsource", nb_name)
render_context = {"slug": slug,
"title": title,
"date": now.date(),
"time": now.time(),
"src_file": dst_file} # we are writing a source ...
if os.path.isfile(dst_file):
# If the topic exists do not overwrite it XXX [unless option set].
sys.exit("file {} already exists".format(dst_file))
dst_nb_file = open(dst_file, "w")
nb_content = src_template.render(render_context)
dst_nb_file.write(nb_content)
dst_nb_file.close()
if __name__ == "__main__":
# possible -d option for directory?
if len(sys.argv) < 2:
sys.exit("Sorry, I need a topic name - just one!")
mktopic(sys.argv[1:])
|
|
1fc00c46538052c008dd9f7da9693987fc8a1da6
|
fm/matrix_factorization.py
|
fm/matrix_factorization.py
|
#!/usr/bin/python
import numpy
def matrix_factorization(R,
P,
Q,
K,
train_step=1000,
learning_rate=0.01,
regular_beta=0.01):
"""
Args:
R: the objetive [N, M] matrix, P * Q => R
P: the learned [N, K] matrix
Q: the learned [K, M] matrix
K: the internal feature number
train_step: the step number to train
learning_rate: the learning rate
regular_beta: the regularization beta
Return:
P: the trained P
Q: the trained Q
"""
N = len(R)
M = len(R[0])
# Train with specified steps
for step in xrange(train_step):
# Loop the 2-dim matrix
for i in xrange(N):
for j in xrange(M):
# Ignore the missing data
if R[i][j] > 0:
# Compute the (r - r'), r' = Pi * Qj
eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
# Update P and Q with gradient
for k in xrange(K):
# Refer to https://mp.weixin.qq.com/s/CD6TrQeKOkGZkbd7Zklaqg , grad = -2 * (r - r') * Q
P[i][k] = P[i][k] + learning_rate * (
2 * eij * Q[k][j] - regular_beta * P[i][k])
Q[k][j] = Q[k][j] + learning_rate * (
2 * eij * P[i][k] - regular_beta * Q[k][j])
return P, Q
if __name__ == "__main__":
R = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
R = numpy.array(R)
print("Orgin R: {}".format(R))
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N, K)
Q = numpy.random.rand(K, M)
new_P, new_Q = matrix_factorization(R, P, Q, K)
new_R = numpy.dot(new_P, new_Q)
print("P: {}\nQ: {}\nR: {}".format(new_P, new_Q, new_R))
|
Add implementation of matrix factorization
|
Add implementation of matrix factorization
|
Python
|
mit
|
tobegit3hub/ml_implementation
|
Add implementation of matrix factorization
|
#!/usr/bin/python
import numpy
def matrix_factorization(R,
P,
Q,
K,
train_step=1000,
learning_rate=0.01,
regular_beta=0.01):
"""
Args:
R: the objetive [N, M] matrix, P * Q => R
P: the learned [N, K] matrix
Q: the learned [K, M] matrix
K: the internal feature number
train_step: the step number to train
learning_rate: the learning rate
regular_beta: the regularization beta
Return:
P: the trained P
Q: the trained Q
"""
N = len(R)
M = len(R[0])
# Train with specified steps
for step in xrange(train_step):
# Loop the 2-dim matrix
for i in xrange(N):
for j in xrange(M):
# Ignore the missing data
if R[i][j] > 0:
# Compute the (r - r'), r' = Pi * Qj
eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
# Update P and Q with gradient
for k in xrange(K):
# Refer to https://mp.weixin.qq.com/s/CD6TrQeKOkGZkbd7Zklaqg , grad = -2 * (r - r') * Q
P[i][k] = P[i][k] + learning_rate * (
2 * eij * Q[k][j] - regular_beta * P[i][k])
Q[k][j] = Q[k][j] + learning_rate * (
2 * eij * P[i][k] - regular_beta * Q[k][j])
return P, Q
if __name__ == "__main__":
R = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
R = numpy.array(R)
print("Orgin R: {}".format(R))
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N, K)
Q = numpy.random.rand(K, M)
new_P, new_Q = matrix_factorization(R, P, Q, K)
new_R = numpy.dot(new_P, new_Q)
print("P: {}\nQ: {}\nR: {}".format(new_P, new_Q, new_R))
|
<commit_before><commit_msg>Add implementation of matrix factorization<commit_after>
|
#!/usr/bin/python
import numpy
def matrix_factorization(R,
P,
Q,
K,
train_step=1000,
learning_rate=0.01,
regular_beta=0.01):
"""
Args:
R: the objetive [N, M] matrix, P * Q => R
P: the learned [N, K] matrix
Q: the learned [K, M] matrix
K: the internal feature number
train_step: the step number to train
learning_rate: the learning rate
regular_beta: the regularization beta
Return:
P: the trained P
Q: the trained Q
"""
N = len(R)
M = len(R[0])
# Train with specified steps
for step in xrange(train_step):
# Loop the 2-dim matrix
for i in xrange(N):
for j in xrange(M):
# Ignore the missing data
if R[i][j] > 0:
# Compute the (r - r'), r' = Pi * Qj
eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
# Update P and Q with gradient
for k in xrange(K):
# Refer to https://mp.weixin.qq.com/s/CD6TrQeKOkGZkbd7Zklaqg , grad = -2 * (r - r') * Q
P[i][k] = P[i][k] + learning_rate * (
2 * eij * Q[k][j] - regular_beta * P[i][k])
Q[k][j] = Q[k][j] + learning_rate * (
2 * eij * P[i][k] - regular_beta * Q[k][j])
return P, Q
if __name__ == "__main__":
R = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
R = numpy.array(R)
print("Orgin R: {}".format(R))
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N, K)
Q = numpy.random.rand(K, M)
new_P, new_Q = matrix_factorization(R, P, Q, K)
new_R = numpy.dot(new_P, new_Q)
print("P: {}\nQ: {}\nR: {}".format(new_P, new_Q, new_R))
|
Add implementation of matrix factorization#!/usr/bin/python
import numpy
def matrix_factorization(R,
P,
Q,
K,
train_step=1000,
learning_rate=0.01,
regular_beta=0.01):
"""
Args:
R: the objetive [N, M] matrix, P * Q => R
P: the learned [N, K] matrix
Q: the learned [K, M] matrix
K: the internal feature number
train_step: the step number to train
learning_rate: the learning rate
regular_beta: the regularization beta
Return:
P: the trained P
Q: the trained Q
"""
N = len(R)
M = len(R[0])
# Train with specified steps
for step in xrange(train_step):
# Loop the 2-dim matrix
for i in xrange(N):
for j in xrange(M):
# Ignore the missing data
if R[i][j] > 0:
# Compute the (r - r'), r' = Pi * Qj
eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
# Update P and Q with gradient
for k in xrange(K):
# Refer to https://mp.weixin.qq.com/s/CD6TrQeKOkGZkbd7Zklaqg , grad = -2 * (r - r') * Q
P[i][k] = P[i][k] + learning_rate * (
2 * eij * Q[k][j] - regular_beta * P[i][k])
Q[k][j] = Q[k][j] + learning_rate * (
2 * eij * P[i][k] - regular_beta * Q[k][j])
return P, Q
if __name__ == "__main__":
R = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
R = numpy.array(R)
print("Orgin R: {}".format(R))
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N, K)
Q = numpy.random.rand(K, M)
new_P, new_Q = matrix_factorization(R, P, Q, K)
new_R = numpy.dot(new_P, new_Q)
print("P: {}\nQ: {}\nR: {}".format(new_P, new_Q, new_R))
|
<commit_before><commit_msg>Add implementation of matrix factorization<commit_after>#!/usr/bin/python
import numpy
def matrix_factorization(R,
P,
Q,
K,
train_step=1000,
learning_rate=0.01,
regular_beta=0.01):
"""
Args:
R: the objetive [N, M] matrix, P * Q => R
P: the learned [N, K] matrix
Q: the learned [K, M] matrix
K: the internal feature number
train_step: the step number to train
learning_rate: the learning rate
regular_beta: the regularization beta
Return:
P: the trained P
Q: the trained Q
"""
N = len(R)
M = len(R[0])
# Train with specified steps
for step in xrange(train_step):
# Loop the 2-dim matrix
for i in xrange(N):
for j in xrange(M):
# Ignore the missing data
if R[i][j] > 0:
# Compute the (r - r'), r' = Pi * Qj
eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
# Update P and Q with gradient
for k in xrange(K):
# Refer to https://mp.weixin.qq.com/s/CD6TrQeKOkGZkbd7Zklaqg , grad = -2 * (r - r') * Q
P[i][k] = P[i][k] + learning_rate * (
2 * eij * Q[k][j] - regular_beta * P[i][k])
Q[k][j] = Q[k][j] + learning_rate * (
2 * eij * P[i][k] - regular_beta * Q[k][j])
return P, Q
if __name__ == "__main__":
R = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
R = numpy.array(R)
print("Orgin R: {}".format(R))
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N, K)
Q = numpy.random.rand(K, M)
new_P, new_Q = matrix_factorization(R, P, Q, K)
new_R = numpy.dot(new_P, new_Q)
print("P: {}\nQ: {}\nR: {}".format(new_P, new_Q, new_R))
|
|
1d086e026e5f480c8205951dfe714cceaad929c7
|
config/trace_pox_l2_consistency.py
|
config/trace_pox_l2_consistency.py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import BufferedPatchPanel
from sts.topology import ConsistencyTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
consistent = False
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
''' forwarding.consistency --consistent=%s --deny=False '''
''' --update_wait=10 --update_once=True --consistent_sleep=5 '''
''' openflow.of_01 --address=__address__ --port=__port__ ''' % consistent)
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
steps = 200
topology_class = ConsistencyTopology
topology_params = ""
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_%s-%s-steps%d" % (topology_class.__name__, consistent, steps)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=steps,
send_init_packets=False,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add the consistency config file
|
Add the consistency config file
|
Python
|
apache-2.0
|
jmiserez/sts,jmiserez/sts
|
Add the consistency config file
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import BufferedPatchPanel
from sts.topology import ConsistencyTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
consistent = False
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
''' forwarding.consistency --consistent=%s --deny=False '''
''' --update_wait=10 --update_once=True --consistent_sleep=5 '''
''' openflow.of_01 --address=__address__ --port=__port__ ''' % consistent)
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
steps = 200
topology_class = ConsistencyTopology
topology_params = ""
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_%s-%s-steps%d" % (topology_class.__name__, consistent, steps)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=steps,
send_init_packets=False,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add the consistency config file<commit_after>
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import BufferedPatchPanel
from sts.topology import ConsistencyTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
consistent = False
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
''' forwarding.consistency --consistent=%s --deny=False '''
''' --update_wait=10 --update_once=True --consistent_sleep=5 '''
''' openflow.of_01 --address=__address__ --port=__port__ ''' % consistent)
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
steps = 200
topology_class = ConsistencyTopology
topology_params = ""
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_%s-%s-steps%d" % (topology_class.__name__, consistent, steps)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=steps,
send_init_packets=False,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add the consistency config filefrom config.experiment_config_lib import ControllerConfig
from sts.topology import BufferedPatchPanel
from sts.topology import ConsistencyTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
consistent = False
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
''' forwarding.consistency --consistent=%s --deny=False '''
''' --update_wait=10 --update_once=True --consistent_sleep=5 '''
''' openflow.of_01 --address=__address__ --port=__port__ ''' % consistent)
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
steps = 200
topology_class = ConsistencyTopology
topology_params = ""
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_%s-%s-steps%d" % (topology_class.__name__, consistent, steps)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=steps,
send_init_packets=False,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add the consistency config file<commit_after>from config.experiment_config_lib import ControllerConfig
from sts.topology import BufferedPatchPanel
from sts.topology import ConsistencyTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
consistent = False
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
''' forwarding.consistency --consistent=%s --deny=False '''
''' --update_wait=10 --update_once=True --consistent_sleep=5 '''
''' openflow.of_01 --address=__address__ --port=__port__ ''' % consistent)
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
steps = 200
topology_class = ConsistencyTopology
topology_params = ""
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_%s-%s-steps%d" % (topology_class.__name__, consistent, steps)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=steps,
send_init_packets=False,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
|
6f6bfbd7d627b518be830567a36b89a0859b4974
|
astropy_helpers/test_helpers.py
|
astropy_helpers/test_helpers.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.0.4; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.1.0; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
Change the warning message, since this will only affect v1.1 and up
|
Change the warning message, since this will only affect v1.1 and up [skip ci]
|
Python
|
bsd-3-clause
|
astropy/astropy-helpers,bsipocz/astropy-helpers,dpshelio/astropy-helpers,Cadair/astropy-helpers,embray/astropy_helpers,dpshelio/astropy-helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,astropy/astropy-helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,embray/astropy_helpers,embray/astropy_helpers,larrybradley/astropy-helpers,embray/astropy_helpers,Cadair/astropy-helpers
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.0.4; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
Change the warning message, since this will only affect v1.1 and up [skip ci]
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.1.0; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
<commit_before>from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.0.4; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
<commit_msg>Change the warning message, since this will only affect v1.1 and up [skip ci]<commit_after>
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.1.0; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.0.4; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
Change the warning message, since this will only affect v1.1 and up [skip ci]from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.1.0; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
<commit_before>from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.0.4; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
<commit_msg>Change the warning message, since this will only affect v1.1 and up [skip ci]<commit_after>from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from .commands.test import AstropyTest
# Leaving this module here for now, but really it needn't exist
# (and it's doubtful that any code depends on it anymore)
warnings.warn('The astropy_helpers.test_helpers module is deprecated as '
'of version 1.1.0; the AstropyTest command can be found in '
'astropy_helpers.commands.test.', DeprecationWarning)
|
f8342d9bb3cfecacf0d1d0ab24fdba45459afae8
|
towers_of_hanoi/python/towers_of_hanoi.py
|
towers_of_hanoi/python/towers_of_hanoi.py
|
def move( n, src, dest, temp ):
if n >= 1 :
move( n - 1, src, temp, dest )
print( "Moving %d -> %d" % (src, dest))
move( n - 1, temp, dest, src )
def main():
move(3,1,3,2)
if __name__ == "__main__":
main()
|
Add python implementation of toh
|
Add python implementation of toh
|
Python
|
cc0-1.0
|
EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,arijitkar98/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,arijitkar98/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,arijitkar98/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,arijitkar98/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,arijitkar98/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,arijitkar98/al-go-rithms,Deepak345/al-go-rithms,arijitkar98/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,arijitkar98/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,arijitkar98/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,arijitkar98/al-go-rithms
|
Add python implementation of toh
|
def move( n, src, dest, temp ):
if n >= 1 :
move( n - 1, src, temp, dest )
print( "Moving %d -> %d" % (src, dest))
move( n - 1, temp, dest, src )
def main():
move(3,1,3,2)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add python implementation of toh<commit_after>
|
def move( n, src, dest, temp ):
if n >= 1 :
move( n - 1, src, temp, dest )
print( "Moving %d -> %d" % (src, dest))
move( n - 1, temp, dest, src )
def main():
move(3,1,3,2)
if __name__ == "__main__":
main()
|
Add python implementation of tohdef move( n, src, dest, temp ):
if n >= 1 :
move( n - 1, src, temp, dest )
print( "Moving %d -> %d" % (src, dest))
move( n - 1, temp, dest, src )
def main():
move(3,1,3,2)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add python implementation of toh<commit_after>def move( n, src, dest, temp ):
if n >= 1 :
move( n - 1, src, temp, dest )
print( "Moving %d -> %d" % (src, dest))
move( n - 1, temp, dest, src )
def main():
move(3,1,3,2)
if __name__ == "__main__":
main()
|
|
53e9bf58bebf8b440f76558575d656e41cbaf4f9
|
python/ecep/portal/management/commands/load_neighborhoods.py
|
python/ecep/portal/management/commands/load_neighborhoods.py
|
# Copyright (c) 2013 Azavea, Inc.
# See LICENSE in the project root for copying permission
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from django.db import IntegrityError
from portal.models import Neighborhood
class Command(BaseCommand):
"""
Import shapefile of neighborhoods into database
"""
args = '<none>'
help = """This management command will load the neighborhood shapefile in the ecep/data/ directory
of this project using the portal.Neighborhood models of this django application"""
def handle(self, *args, **options):
"""
Load neighborhood shapefile using LayerMapping; automatically checks projection,
if necessary transforms to WSG 1984
"""
neighborhood_mapping = {
'boundary': 'MULTIPOLYGON',
'primary_name': 'PRI_NEIGH',
'secondary_name': 'SEC_NEIGH',
}
path_to_shp = 'data/chicago_neighborhoods/Neighborhoods_2012b.shp'
lm = LayerMapping(Neighborhood, path_to_shp, neighborhood_mapping)
self.check_neighborhood_table()
lm.save(strict=True)
self.stdout.write('Successfully loaded %s neighborhoods from %s layer(s)\n'
% (len(lm.ds[0]), lm.ds.layer_count))
def check_neighborhood_table(self):
"""Checks whether or not neighborhoods are already loaded, raises an error if
the neighborhood table already has data. This prevents user from just duplicate
copies of the neighborhood data.
"""
n_count = Neighborhood.objects.filter().count()
if n_count > 0:
raise IntegrityError('Neighborhood table already has data in it; please remove this data to proceed')
|
Add function to load neighborhood data
|
Add function to load neighborhood data
|
Python
|
mit
|
smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning
|
Add function to load neighborhood data
|
# Copyright (c) 2013 Azavea, Inc.
# See LICENSE in the project root for copying permission
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from django.db import IntegrityError
from portal.models import Neighborhood
class Command(BaseCommand):
"""
Import shapefile of neighborhoods into database
"""
args = '<none>'
help = """This management command will load the neighborhood shapefile in the ecep/data/ directory
of this project using the portal.Neighborhood models of this django application"""
def handle(self, *args, **options):
"""
Load neighborhood shapefile using LayerMapping; automatically checks projection,
if necessary transforms to WSG 1984
"""
neighborhood_mapping = {
'boundary': 'MULTIPOLYGON',
'primary_name': 'PRI_NEIGH',
'secondary_name': 'SEC_NEIGH',
}
path_to_shp = 'data/chicago_neighborhoods/Neighborhoods_2012b.shp'
lm = LayerMapping(Neighborhood, path_to_shp, neighborhood_mapping)
self.check_neighborhood_table()
lm.save(strict=True)
self.stdout.write('Successfully loaded %s neighborhoods from %s layer(s)\n'
% (len(lm.ds[0]), lm.ds.layer_count))
def check_neighborhood_table(self):
"""Checks whether or not neighborhoods are already loaded, raises an error if
the neighborhood table already has data. This prevents user from just duplicate
copies of the neighborhood data.
"""
n_count = Neighborhood.objects.filter().count()
if n_count > 0:
raise IntegrityError('Neighborhood table already has data in it; please remove this data to proceed')
|
<commit_before><commit_msg>Add function to load neighborhood data<commit_after>
|
# Copyright (c) 2013 Azavea, Inc.
# See LICENSE in the project root for copying permission
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from django.db import IntegrityError
from portal.models import Neighborhood
class Command(BaseCommand):
"""
Import shapefile of neighborhoods into database
"""
args = '<none>'
help = """This management command will load the neighborhood shapefile in the ecep/data/ directory
of this project using the portal.Neighborhood models of this django application"""
def handle(self, *args, **options):
"""
Load neighborhood shapefile using LayerMapping; automatically checks projection,
if necessary transforms to WSG 1984
"""
neighborhood_mapping = {
'boundary': 'MULTIPOLYGON',
'primary_name': 'PRI_NEIGH',
'secondary_name': 'SEC_NEIGH',
}
path_to_shp = 'data/chicago_neighborhoods/Neighborhoods_2012b.shp'
lm = LayerMapping(Neighborhood, path_to_shp, neighborhood_mapping)
self.check_neighborhood_table()
lm.save(strict=True)
self.stdout.write('Successfully loaded %s neighborhoods from %s layer(s)\n'
% (len(lm.ds[0]), lm.ds.layer_count))
def check_neighborhood_table(self):
"""Checks whether or not neighborhoods are already loaded, raises an error if
the neighborhood table already has data. This prevents user from just duplicate
copies of the neighborhood data.
"""
n_count = Neighborhood.objects.filter().count()
if n_count > 0:
raise IntegrityError('Neighborhood table already has data in it; please remove this data to proceed')
|
Add function to load neighborhood data# Copyright (c) 2013 Azavea, Inc.
# See LICENSE in the project root for copying permission
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from django.db import IntegrityError
from portal.models import Neighborhood
class Command(BaseCommand):
"""
Import shapefile of neighborhoods into database
"""
args = '<none>'
help = """This management command will load the neighborhood shapefile in the ecep/data/ directory
of this project using the portal.Neighborhood models of this django application"""
def handle(self, *args, **options):
"""
Load neighborhood shapefile using LayerMapping; automatically checks projection,
if necessary transforms to WSG 1984
"""
neighborhood_mapping = {
'boundary': 'MULTIPOLYGON',
'primary_name': 'PRI_NEIGH',
'secondary_name': 'SEC_NEIGH',
}
path_to_shp = 'data/chicago_neighborhoods/Neighborhoods_2012b.shp'
lm = LayerMapping(Neighborhood, path_to_shp, neighborhood_mapping)
self.check_neighborhood_table()
lm.save(strict=True)
self.stdout.write('Successfully loaded %s neighborhoods from %s layer(s)\n'
% (len(lm.ds[0]), lm.ds.layer_count))
def check_neighborhood_table(self):
"""Checks whether or not neighborhoods are already loaded, raises an error if
the neighborhood table already has data. This prevents user from just duplicate
copies of the neighborhood data.
"""
n_count = Neighborhood.objects.filter().count()
if n_count > 0:
raise IntegrityError('Neighborhood table already has data in it; please remove this data to proceed')
|
<commit_before><commit_msg>Add function to load neighborhood data<commit_after># Copyright (c) 2013 Azavea, Inc.
# See LICENSE in the project root for copying permission
from django.core.management.base import BaseCommand
from django.contrib.gis.utils import LayerMapping
from django.db import IntegrityError
from portal.models import Neighborhood
class Command(BaseCommand):
"""
Import shapefile of neighborhoods into database
"""
args = '<none>'
help = """This management command will load the neighborhood shapefile in the ecep/data/ directory
of this project using the portal.Neighborhood models of this django application"""
def handle(self, *args, **options):
"""
Load neighborhood shapefile using LayerMapping; automatically checks projection,
if necessary transforms to WSG 1984
"""
neighborhood_mapping = {
'boundary': 'MULTIPOLYGON',
'primary_name': 'PRI_NEIGH',
'secondary_name': 'SEC_NEIGH',
}
path_to_shp = 'data/chicago_neighborhoods/Neighborhoods_2012b.shp'
lm = LayerMapping(Neighborhood, path_to_shp, neighborhood_mapping)
self.check_neighborhood_table()
lm.save(strict=True)
self.stdout.write('Successfully loaded %s neighborhoods from %s layer(s)\n'
% (len(lm.ds[0]), lm.ds.layer_count))
def check_neighborhood_table(self):
"""Checks whether or not neighborhoods are already loaded, raises an error if
the neighborhood table already has data. This prevents user from just duplicate
copies of the neighborhood data.
"""
n_count = Neighborhood.objects.filter().count()
if n_count > 0:
raise IntegrityError('Neighborhood table already has data in it; please remove this data to proceed')
|
|
d3188c550c486c4219cf0cc0e38f6696f8b340af
|
pratica-01/randlist.py
|
pratica-01/randlist.py
|
# encoding:utf-8
import random
def main():
rl = RandList(500, 1000)
rl.gen_random_list()
rl.print_block_list()
print "\n\nPronto!"
class RandList(object):
def __init__(self, first, second):
self.first = first
self.second = second
self.populacao = []
def gen_random_list(self):
self.populacao = range(self.first, self.second)
del self.populacao[28]
random.shuffle(self.populacao)
def print_block_list(self):
print "[",
for i in range(15):
for j in range(15):
print "%3d," % self.populacao[i*7+j],
print "]"
if __name__ == "__main__":
main()
|
Add classe RandList.py para gerar um deck embaralhado
|
Add classe RandList.py para gerar um deck embaralhado
|
Python
|
mit
|
tonussi/freezing-dubstep,tonussi/freezing-dubstep,tonussi/freezing-dubstep
|
Add classe RandList.py para gerar um deck embaralhado
|
# encoding:utf-8
import random
def main():
rl = RandList(500, 1000)
rl.gen_random_list()
rl.print_block_list()
print "\n\nPronto!"
class RandList(object):
def __init__(self, first, second):
self.first = first
self.second = second
self.populacao = []
def gen_random_list(self):
self.populacao = range(self.first, self.second)
del self.populacao[28]
random.shuffle(self.populacao)
def print_block_list(self):
print "[",
for i in range(15):
for j in range(15):
print "%3d," % self.populacao[i*7+j],
print "]"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add classe RandList.py para gerar um deck embaralhado<commit_after>
|
# encoding:utf-8
import random
def main():
rl = RandList(500, 1000)
rl.gen_random_list()
rl.print_block_list()
print "\n\nPronto!"
class RandList(object):
def __init__(self, first, second):
self.first = first
self.second = second
self.populacao = []
def gen_random_list(self):
self.populacao = range(self.first, self.second)
del self.populacao[28]
random.shuffle(self.populacao)
def print_block_list(self):
print "[",
for i in range(15):
for j in range(15):
print "%3d," % self.populacao[i*7+j],
print "]"
if __name__ == "__main__":
main()
|
Add classe RandList.py para gerar um deck embaralhado# encoding:utf-8
import random
def main():
rl = RandList(500, 1000)
rl.gen_random_list()
rl.print_block_list()
print "\n\nPronto!"
class RandList(object):
def __init__(self, first, second):
self.first = first
self.second = second
self.populacao = []
def gen_random_list(self):
self.populacao = range(self.first, self.second)
del self.populacao[28]
random.shuffle(self.populacao)
def print_block_list(self):
print "[",
for i in range(15):
for j in range(15):
print "%3d," % self.populacao[i*7+j],
print "]"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add classe RandList.py para gerar um deck embaralhado<commit_after># encoding:utf-8
import random
def main():
rl = RandList(500, 1000)
rl.gen_random_list()
rl.print_block_list()
print "\n\nPronto!"
class RandList(object):
def __init__(self, first, second):
self.first = first
self.second = second
self.populacao = []
def gen_random_list(self):
self.populacao = range(self.first, self.second)
del self.populacao[28]
random.shuffle(self.populacao)
def print_block_list(self):
print "[",
for i in range(15):
for j in range(15):
print "%3d," % self.populacao[i*7+j],
print "]"
if __name__ == "__main__":
main()
|
|
36f2372c6b5a25d96ecc38297a05a139e6e7f6b5
|
results/migrations/0012_migrate_resultevent_data.py
|
results/migrations/0012_migrate_resultevent_data.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_remaining_fields_to_popolo_models(apps, schema_editor):
ResultEvent = apps.get_model('results', 'ResultEvent')
Organization = apps.get_model('popolo', 'Organization')
Post = apps.get_model('popolo', 'Post')
Election = apps.get_model('elections', 'Election')
for re in ResultEvent.objects.all():
# Get the post, if possible (some have been deleted), and the
# election based on the existing text-based fields:
if re.election == '2015':
election_name = '2015 General Election'
else:
election_name = re.election
post = Post.objects.filter(extra__slug=re.post_id).first()
if post:
election = post.extra.elections.filter(name=election_name).first()
if not election:
election = post.extra.elections.get(
name=election_name,
election_date__year=re.created.year,
)
else:
election = Election.objects.get(name=re.election)
re.election_new = election
re.post_new = post
# Now get the party of the winner:
re.winner_party_new = Organization.objects.get(
extra__slug=re.winner_party_id)
re.save()
class Migration(migrations.Migration):
dependencies = [
('results', '0011_resultevent_post_new'),
]
operations = [
migrations.RunPython(migrate_remaining_fields_to_popolo_models),
]
|
Add a data migration to move more ResultEvent fields to Popolo models
|
Add a data migration to move more ResultEvent fields to Popolo models
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add a data migration to move more ResultEvent fields to Popolo models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_remaining_fields_to_popolo_models(apps, schema_editor):
ResultEvent = apps.get_model('results', 'ResultEvent')
Organization = apps.get_model('popolo', 'Organization')
Post = apps.get_model('popolo', 'Post')
Election = apps.get_model('elections', 'Election')
for re in ResultEvent.objects.all():
# Get the post, if possible (some have been deleted), and the
# election based on the existing text-based fields:
if re.election == '2015':
election_name = '2015 General Election'
else:
election_name = re.election
post = Post.objects.filter(extra__slug=re.post_id).first()
if post:
election = post.extra.elections.filter(name=election_name).first()
if not election:
election = post.extra.elections.get(
name=election_name,
election_date__year=re.created.year,
)
else:
election = Election.objects.get(name=re.election)
re.election_new = election
re.post_new = post
# Now get the party of the winner:
re.winner_party_new = Organization.objects.get(
extra__slug=re.winner_party_id)
re.save()
class Migration(migrations.Migration):
dependencies = [
('results', '0011_resultevent_post_new'),
]
operations = [
migrations.RunPython(migrate_remaining_fields_to_popolo_models),
]
|
<commit_before><commit_msg>Add a data migration to move more ResultEvent fields to Popolo models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_remaining_fields_to_popolo_models(apps, schema_editor):
ResultEvent = apps.get_model('results', 'ResultEvent')
Organization = apps.get_model('popolo', 'Organization')
Post = apps.get_model('popolo', 'Post')
Election = apps.get_model('elections', 'Election')
for re in ResultEvent.objects.all():
# Get the post, if possible (some have been deleted), and the
# election based on the existing text-based fields:
if re.election == '2015':
election_name = '2015 General Election'
else:
election_name = re.election
post = Post.objects.filter(extra__slug=re.post_id).first()
if post:
election = post.extra.elections.filter(name=election_name).first()
if not election:
election = post.extra.elections.get(
name=election_name,
election_date__year=re.created.year,
)
else:
election = Election.objects.get(name=re.election)
re.election_new = election
re.post_new = post
# Now get the party of the winner:
re.winner_party_new = Organization.objects.get(
extra__slug=re.winner_party_id)
re.save()
class Migration(migrations.Migration):
dependencies = [
('results', '0011_resultevent_post_new'),
]
operations = [
migrations.RunPython(migrate_remaining_fields_to_popolo_models),
]
|
Add a data migration to move more ResultEvent fields to Popolo models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_remaining_fields_to_popolo_models(apps, schema_editor):
ResultEvent = apps.get_model('results', 'ResultEvent')
Organization = apps.get_model('popolo', 'Organization')
Post = apps.get_model('popolo', 'Post')
Election = apps.get_model('elections', 'Election')
for re in ResultEvent.objects.all():
# Get the post, if possible (some have been deleted), and the
# election based on the existing text-based fields:
if re.election == '2015':
election_name = '2015 General Election'
else:
election_name = re.election
post = Post.objects.filter(extra__slug=re.post_id).first()
if post:
election = post.extra.elections.filter(name=election_name).first()
if not election:
election = post.extra.elections.get(
name=election_name,
election_date__year=re.created.year,
)
else:
election = Election.objects.get(name=re.election)
re.election_new = election
re.post_new = post
# Now get the party of the winner:
re.winner_party_new = Organization.objects.get(
extra__slug=re.winner_party_id)
re.save()
class Migration(migrations.Migration):
dependencies = [
('results', '0011_resultevent_post_new'),
]
operations = [
migrations.RunPython(migrate_remaining_fields_to_popolo_models),
]
|
<commit_before><commit_msg>Add a data migration to move more ResultEvent fields to Popolo models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_remaining_fields_to_popolo_models(apps, schema_editor):
ResultEvent = apps.get_model('results', 'ResultEvent')
Organization = apps.get_model('popolo', 'Organization')
Post = apps.get_model('popolo', 'Post')
Election = apps.get_model('elections', 'Election')
for re in ResultEvent.objects.all():
# Get the post, if possible (some have been deleted), and the
# election based on the existing text-based fields:
if re.election == '2015':
election_name = '2015 General Election'
else:
election_name = re.election
post = Post.objects.filter(extra__slug=re.post_id).first()
if post:
election = post.extra.elections.filter(name=election_name).first()
if not election:
election = post.extra.elections.get(
name=election_name,
election_date__year=re.created.year,
)
else:
election = Election.objects.get(name=re.election)
re.election_new = election
re.post_new = post
# Now get the party of the winner:
re.winner_party_new = Organization.objects.get(
extra__slug=re.winner_party_id)
re.save()
class Migration(migrations.Migration):
dependencies = [
('results', '0011_resultevent_post_new'),
]
operations = [
migrations.RunPython(migrate_remaining_fields_to_popolo_models),
]
|
|
1ef3cda8915d22e6e862eab06796783af43fe0e5
|
app/timetables/migrations/0009_auto_20160919_1149.py
|
app/timetables/migrations/0009_auto_20160919_1149.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0008_auto_20160913_2203'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug',
field=models.SlugField(editable=False, max_length=150, null=True, unique=True),
),
migrations.AddField(
model_name='dish',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='meal',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AddField(
model_name='mealoption',
name='slug',
field=models.SlugField(editable=False, max_length=120, null=True, unique=True),
),
migrations.AddField(
model_name='timetable',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='weekday',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AlterField(
model_name='weekday',
name='name',
field=models.CharField(max_length=60),
),
migrations.AlterUniqueTogether(
name='admin',
unique_together=set([('user', 'timetable')]),
),
]
|
Add migration for additional slug fields.
|
Add migration for additional slug fields.
|
Python
|
mit
|
teamtaverna/core
|
Add migration for additional slug fields.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0008_auto_20160913_2203'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug',
field=models.SlugField(editable=False, max_length=150, null=True, unique=True),
),
migrations.AddField(
model_name='dish',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='meal',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AddField(
model_name='mealoption',
name='slug',
field=models.SlugField(editable=False, max_length=120, null=True, unique=True),
),
migrations.AddField(
model_name='timetable',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='weekday',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AlterField(
model_name='weekday',
name='name',
field=models.CharField(max_length=60),
),
migrations.AlterUniqueTogether(
name='admin',
unique_together=set([('user', 'timetable')]),
),
]
|
<commit_before><commit_msg>Add migration for additional slug fields.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0008_auto_20160913_2203'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug',
field=models.SlugField(editable=False, max_length=150, null=True, unique=True),
),
migrations.AddField(
model_name='dish',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='meal',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AddField(
model_name='mealoption',
name='slug',
field=models.SlugField(editable=False, max_length=120, null=True, unique=True),
),
migrations.AddField(
model_name='timetable',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='weekday',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AlterField(
model_name='weekday',
name='name',
field=models.CharField(max_length=60),
),
migrations.AlterUniqueTogether(
name='admin',
unique_together=set([('user', 'timetable')]),
),
]
|
Add migration for additional slug fields.# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0008_auto_20160913_2203'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug',
field=models.SlugField(editable=False, max_length=150, null=True, unique=True),
),
migrations.AddField(
model_name='dish',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='meal',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AddField(
model_name='mealoption',
name='slug',
field=models.SlugField(editable=False, max_length=120, null=True, unique=True),
),
migrations.AddField(
model_name='timetable',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='weekday',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AlterField(
model_name='weekday',
name='name',
field=models.CharField(max_length=60),
),
migrations.AlterUniqueTogether(
name='admin',
unique_together=set([('user', 'timetable')]),
),
]
|
<commit_before><commit_msg>Add migration for additional slug fields.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0008_auto_20160913_2203'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug',
field=models.SlugField(editable=False, max_length=150, null=True, unique=True),
),
migrations.AddField(
model_name='dish',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='meal',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AddField(
model_name='mealoption',
name='slug',
field=models.SlugField(editable=False, max_length=120, null=True, unique=True),
),
migrations.AddField(
model_name='timetable',
name='slug',
field=models.SlugField(editable=False, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='weekday',
name='slug',
field=models.SlugField(editable=False, max_length=60, null=True, unique=True),
),
migrations.AlterField(
model_name='weekday',
name='name',
field=models.CharField(max_length=60),
),
migrations.AlterUniqueTogether(
name='admin',
unique_together=set([('user', 'timetable')]),
),
]
|
|
da8b184267d04ae8c95772b4cbfaef7603d4ed67
|
scripts/jenkins_console_log_search.py
|
scripts/jenkins_console_log_search.py
|
#!/usr/bin/env python3
"""
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
"""
import argparse
import json
import requests
import sys
import time
serverURL = 'http://cv.jenkins.couchbase.com/'
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str,
help='The string to search the logs for', required=True)
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards '
'from. 0 (default) fetches latest build number',
default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
args = argParser.parse_args()
job = 'job/' + args.job + '/'
consoleText = '/consoleText/'
resultURLs = []
if args.build_no == 0:
# need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
print("Searching for:", ('"' + args.search + '"'), "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1), "and",
args.build_no, file=sys.stderr)
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
result = r.text.find(args.search)
if result != -1:
resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/')
print('\r Completed search in', (time.time() - start_time), 's',
file=sys.stderr)
for url in resultURLs:
print(url)
|
Add utility script for searching Jenkins console logs
|
Add utility script for searching Jenkins console logs
This small python script can be used to quickly check the last
100 (or more if you're willing to edit and wait) to see if a
string is present within the console log. This can help find
instances of errors to help determine intermittent failures from
one off problems. The script requires requests which is not
included within kv itself (in couchbase-cli) so you cannot run
directly inside kv.
Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e
Reviewed-on: http://review.couchbase.org/103291
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>
|
Python
|
bsd-3-clause
|
daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine
|
Add utility script for searching Jenkins console logs
This small python script can be used to quickly check the last
100 (or more if you're willing to edit and wait) to see if a
string is present within the console log. This can help find
instances of errors to help determine intermittent failures from
one off problems. The script requires requests which is not
included within kv itself (in couchbase-cli) so you cannot run
directly inside kv.
Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e
Reviewed-on: http://review.couchbase.org/103291
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>
|
#!/usr/bin/env python3
"""
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
"""
import argparse
import json
import requests
import sys
import time
serverURL = 'http://cv.jenkins.couchbase.com/'
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str,
help='The string to search the logs for', required=True)
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards '
'from. 0 (default) fetches latest build number',
default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
args = argParser.parse_args()
job = 'job/' + args.job + '/'
consoleText = '/consoleText/'
resultURLs = []
if args.build_no == 0:
# need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
print("Searching for:", ('"' + args.search + '"'), "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1), "and",
args.build_no, file=sys.stderr)
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
result = r.text.find(args.search)
if result != -1:
resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/')
print('\r Completed search in', (time.time() - start_time), 's',
file=sys.stderr)
for url in resultURLs:
print(url)
|
<commit_before><commit_msg>Add utility script for searching Jenkins console logs
This small python script can be used to quickly check the last
100 (or more if you're willing to edit and wait) to see if a
string is present within the console log. This can help find
instances of errors to help determine intermittent failures from
one off problems. The script requires requests which is not
included within kv itself (in couchbase-cli) so you cannot run
directly inside kv.
Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e
Reviewed-on: http://review.couchbase.org/103291
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com><commit_after>
|
#!/usr/bin/env python3
"""
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
"""
import argparse
import json
import requests
import sys
import time
serverURL = 'http://cv.jenkins.couchbase.com/'
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str,
help='The string to search the logs for', required=True)
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards '
'from. 0 (default) fetches latest build number',
default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
args = argParser.parse_args()
job = 'job/' + args.job + '/'
consoleText = '/consoleText/'
resultURLs = []
if args.build_no == 0:
# need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
print("Searching for:", ('"' + args.search + '"'), "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1), "and",
args.build_no, file=sys.stderr)
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
result = r.text.find(args.search)
if result != -1:
resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/')
print('\r Completed search in', (time.time() - start_time), 's',
file=sys.stderr)
for url in resultURLs:
print(url)
|
Add utility script for searching Jenkins console logs
This small python script can be used to quickly check the last
100 (or more if you're willing to edit and wait) to see if a
string is present within the console log. This can help find
instances of errors to help determine intermittent failures from
one off problems. The script requires requests which is not
included within kv itself (in couchbase-cli) so you cannot run
directly inside kv.
Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e
Reviewed-on: http://review.couchbase.org/103291
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>#!/usr/bin/env python3
"""
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
"""
import argparse
import json
import requests
import sys
import time
serverURL = 'http://cv.jenkins.couchbase.com/'
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str,
help='The string to search the logs for', required=True)
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards '
'from. 0 (default) fetches latest build number',
default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
args = argParser.parse_args()
job = 'job/' + args.job + '/'
consoleText = '/consoleText/'
resultURLs = []
if args.build_no == 0:
# need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
print("Searching for:", ('"' + args.search + '"'), "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1), "and",
args.build_no, file=sys.stderr)
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
result = r.text.find(args.search)
if result != -1:
resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/')
print('\r Completed search in', (time.time() - start_time), 's',
file=sys.stderr)
for url in resultURLs:
print(url)
|
<commit_before><commit_msg>Add utility script for searching Jenkins console logs
This small python script can be used to quickly check the last
100 (or more if you're willing to edit and wait) to see if a
string is present within the console log. This can help find
instances of errors to help determine intermittent failures from
one off problems. The script requires requests which is not
included within kv itself (in couchbase-cli) so you cannot run
directly inside kv.
Change-Id: I1579b01bc3157587787249f34a2aaf3182de0a4e
Reviewed-on: http://review.couchbase.org/103291
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com><commit_after>#!/usr/bin/env python3
"""
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
"""
import argparse
import json
import requests
import sys
import time
serverURL = 'http://cv.jenkins.couchbase.com/'
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str,
help='The string to search the logs for', required=True)
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards '
'from. 0 (default) fetches latest build number',
default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
args = argParser.parse_args()
job = 'job/' + args.job + '/'
consoleText = '/consoleText/'
resultURLs = []
if args.build_no == 0:
# need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
print("Searching for:", ('"' + args.search + '"'), "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1), "and",
args.build_no, file=sys.stderr)
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
result = r.text.find(args.search)
if result != -1:
resultURLs.append(serverURL + job + str(args.build_no-i) + '/console/')
print('\r Completed search in', (time.time() - start_time), 's',
file=sys.stderr)
for url in resultURLs:
print(url)
|
|
1409d2276f5533c9704903197f491beae65c125a
|
familias/migrations/0036_merge_20170621_2219.py
|
familias/migrations/0036_merge_20170621_2219.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-21 22:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('familias', '0035_auto_20170621_2211'),
('familias', '0034_add_oficio_estudiante'),
]
operations = [
]
|
Change the field escuela in the Integrante form to plantel. And add a optional value escuela to all members
|
Change the field escuela in the Integrante form to plantel. And add a
optional value escuela to all members
Merge migrations
|
Python
|
mit
|
erikiado/jp2_online,erikiado/jp2_online,erikiado/jp2_online
|
Change the field escuela in the Integrante form to plantel. And add a
optional value escuela to all members
Merge migrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-21 22:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('familias', '0035_auto_20170621_2211'),
('familias', '0034_add_oficio_estudiante'),
]
operations = [
]
|
<commit_before><commit_msg>Change the field escuela in the Integrante form to plantel. And add a
optional value escuela to all members
Merge migrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-21 22:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('familias', '0035_auto_20170621_2211'),
('familias', '0034_add_oficio_estudiante'),
]
operations = [
]
|
Change the field escuela in the Integrante form to plantel. And add a
optional value escuela to all members
Merge migrations# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-21 22:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('familias', '0035_auto_20170621_2211'),
('familias', '0034_add_oficio_estudiante'),
]
operations = [
]
|
<commit_before><commit_msg>Change the field escuela in the Integrante form to plantel. And add a
optional value escuela to all members
Merge migrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-21 22:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('familias', '0035_auto_20170621_2211'),
('familias', '0034_add_oficio_estudiante'),
]
operations = [
]
|
|
01b5b572149f19550d4fdec3fd7d1e40aee9b624
|
astroid/brain/brain_boto3.py
|
astroid/brain/brain_boto3.py
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for understanding boto3.ServiceRequest()"""
import astroid
from astroid import MANAGER, extract_node
BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource"
def service_request_transform(node):
"""Transform ServiceResource to look like dynamic classes"""
code = """
def __getattr__(self, attr):
return 0
"""
func_getattr = extract_node(code)
node.locals["__getattr__"] = [func_getattr]
return node
def _looks_like_boto3_service_request(node):
return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME
MANAGER.register_transform(
astroid.ClassDef, service_request_transform, _looks_like_boto3_service_request
)
|
Transform boto3.ServiceRequest to look like dynamic class
|
Transform boto3.ServiceRequest to look like dynamic class
`boto3.resource` creates resources dynamically via a resource factory.
Unfortunately that completely breaks static analysis leading to spurious
false positives since pylint cannot determine sanely that attributes
exist or not.
Here's an example of accessing the Topic class out of the `sns` resource.
As you can see, the class is created dynamically rather than existing
in the codebase itself:
```
In [2]: boto3.resource
Out[2]: <function boto3.resource(*args, **kwargs)>
In [3]: boto3.resource('sns')
Out[3]: sns.ServiceResource()
In [4]: boto3.resource('sns').Topic
Out[4]: <bound method ResourceFactory._create_class_partial.<locals>.create_resource of sns.ServiceResource()>
```
This patch adds a fake `__getattr__` method to `ServiceRequest`.
This will prevent `pylint` from emitting `no-member` at all for `ServiceRequest`
instances, but that is a good solution for now until we can load typeshed-like
annotation packages.
Close PyCQA/pylint#3134
|
Python
|
lgpl-2.1
|
PyCQA/astroid
|
Transform boto3.ServiceRequest to look like dynamic class
`boto3.resource` creates resources dynamically via a resource factory.
Unfortunately that completely breaks static analysis leading to spurious
false positives since pylint cannot determine sanely that attributes
exist or not.
Here's an example of accessing the Topic class out of the `sns` resource.
As you can see, the class is created dynamically rather than existing
in the codebase itself:
```
In [2]: boto3.resource
Out[2]: <function boto3.resource(*args, **kwargs)>
In [3]: boto3.resource('sns')
Out[3]: sns.ServiceResource()
In [4]: boto3.resource('sns').Topic
Out[4]: <bound method ResourceFactory._create_class_partial.<locals>.create_resource of sns.ServiceResource()>
```
This patch adds a fake `__getattr__` method to `ServiceRequest`.
This will prevent `pylint` from emitting `no-member` at all for `ServiceRequest`
instances, but that is a good solution for now until we can load typeshed-like
annotation packages.
Close PyCQA/pylint#3134
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for understanding boto3.ServiceRequest()"""
import astroid
from astroid import MANAGER, extract_node
BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource"
def service_request_transform(node):
"""Transform ServiceResource to look like dynamic classes"""
code = """
def __getattr__(self, attr):
return 0
"""
func_getattr = extract_node(code)
node.locals["__getattr__"] = [func_getattr]
return node
def _looks_like_boto3_service_request(node):
return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME
MANAGER.register_transform(
astroid.ClassDef, service_request_transform, _looks_like_boto3_service_request
)
|
<commit_before><commit_msg>Transform boto3.ServiceRequest to look like dynamic class
`boto3.resource` creates resources dynamically via a resource factory.
Unfortunately that completely breaks static analysis leading to spurious
false positives since pylint cannot determine sanely that attributes
exist or not.
Here's an example of accessing the Topic class out of the `sns` resource.
As you can see, the class is created dynamically rather than existing
in the codebase itself:
```
In [2]: boto3.resource
Out[2]: <function boto3.resource(*args, **kwargs)>
In [3]: boto3.resource('sns')
Out[3]: sns.ServiceResource()
In [4]: boto3.resource('sns').Topic
Out[4]: <bound method ResourceFactory._create_class_partial.<locals>.create_resource of sns.ServiceResource()>
```
This patch adds a fake `__getattr__` method to `ServiceRequest`.
This will prevent `pylint` from emitting `no-member` at all for `ServiceRequest`
instances, but that is a good solution for now until we can load typeshed-like
annotation packages.
Close PyCQA/pylint#3134<commit_after>
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for understanding boto3.ServiceRequest()"""
import astroid
from astroid import MANAGER, extract_node
BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource"
def service_request_transform(node):
"""Transform ServiceResource to look like dynamic classes"""
code = """
def __getattr__(self, attr):
return 0
"""
func_getattr = extract_node(code)
node.locals["__getattr__"] = [func_getattr]
return node
def _looks_like_boto3_service_request(node):
return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME
MANAGER.register_transform(
astroid.ClassDef, service_request_transform, _looks_like_boto3_service_request
)
|
Transform boto3.ServiceRequest to look like dynamic class
`boto3.resource` creates resources dynamically via a resource factory.
Unfortunately that completely breaks static analysis leading to spurious
false positives since pylint cannot determine sanely that attributes
exist or not.
Here's an example of accessing the Topic class out of the `sns` resource.
As you can see, the class is created dynamically rather than existing
in the codebase itself:
```
In [2]: boto3.resource
Out[2]: <function boto3.resource(*args, **kwargs)>
In [3]: boto3.resource('sns')
Out[3]: sns.ServiceResource()
In [4]: boto3.resource('sns').Topic
Out[4]: <bound method ResourceFactory._create_class_partial.<locals>.create_resource of sns.ServiceResource()>
```
This patch adds a fake `__getattr__` method to `ServiceRequest`.
This will prevent `pylint` from emitting `no-member` at all for `ServiceRequest`
instances, but that is a good solution for now until we can load typeshed-like
annotation packages.
Close PyCQA/pylint#3134# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for understanding boto3.ServiceRequest()"""
import astroid
from astroid import MANAGER, extract_node
BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource"
def service_request_transform(node):
"""Transform ServiceResource to look like dynamic classes"""
code = """
def __getattr__(self, attr):
return 0
"""
func_getattr = extract_node(code)
node.locals["__getattr__"] = [func_getattr]
return node
def _looks_like_boto3_service_request(node):
return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME
MANAGER.register_transform(
astroid.ClassDef, service_request_transform, _looks_like_boto3_service_request
)
|
<commit_before><commit_msg>Transform boto3.ServiceRequest to look like dynamic class
`boto3.resource` creates resources dynamically via a resource factory.
Unfortunately that completely breaks static analysis leading to spurious
false positives since pylint cannot determine sanely that attributes
exist or not.
Here's an example of accessing the Topic class out of the `sns` resource.
As you can see, the class is created dynamically rather than existing
in the codebase itself:
```
In [2]: boto3.resource
Out[2]: <function boto3.resource(*args, **kwargs)>
In [3]: boto3.resource('sns')
Out[3]: sns.ServiceResource()
In [4]: boto3.resource('sns').Topic
Out[4]: <bound method ResourceFactory._create_class_partial.<locals>.create_resource of sns.ServiceResource()>
```
This patch adds a fake `__getattr__` method to `ServiceRequest`.
This will prevent `pylint` from emitting `no-member` at all for `ServiceRequest`
instances, but that is a good solution for now until we can load typeshed-like
annotation packages.
Close PyCQA/pylint#3134<commit_after># Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for understanding boto3.ServiceRequest()"""
import astroid
from astroid import MANAGER, extract_node
BOTO_SERVICE_FACTORY_QUALIFIED_NAME = "boto3.resources.base.ServiceResource"
def service_request_transform(node):
"""Transform ServiceResource to look like dynamic classes"""
code = """
def __getattr__(self, attr):
return 0
"""
func_getattr = extract_node(code)
node.locals["__getattr__"] = [func_getattr]
return node
def _looks_like_boto3_service_request(node):
return node.qname() == BOTO_SERVICE_FACTORY_QUALIFIED_NAME
MANAGER.register_transform(
astroid.ClassDef, service_request_transform, _looks_like_boto3_service_request
)
|
|
b92e1548f3944465cc7ac112d4c88e9aae8f4ead
|
rowboat/views/webhooks.py
|
rowboat/views/webhooks.py
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['commiter_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['author_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
embed.description += '\n [View Diff]({})'.format(data['compare'])
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
Fix invalid key (nice docs circleci)
|
Fix invalid key (nice docs circleci)
|
Python
|
mit
|
ThaTiemsz/jetski,aliasfalse/rowboat,aliasfalse/rowboat,aliasfalse/rowboat,ThaTiemsz/jetski,b1naryth1ef/rowboat,b1naryth1ef/rowboat,aliasfalse/rowboat,b1naryth1ef/rowboat,ThaTiemsz/jetski,b1naryth1ef/rowboat,ThaTiemsz/jetski
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['commiter_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
Fix invalid key (nice docs circleci)
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['author_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
embed.description += '\n [View Diff]({})'.format(data['compare'])
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
<commit_before>import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['commiter_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
<commit_msg>Fix invalid key (nice docs circleci)<commit_after>
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['author_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
embed.description += '\n [View Diff]({})'.format(data['compare'])
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['commiter_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
Fix invalid key (nice docs circleci)import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['author_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
embed.description += '\n [View Diff]({})'.format(data['compare'])
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
<commit_before>import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['commiter_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
<commit_msg>Fix invalid key (nice docs circleci)<commit_after>import subprocess
from flask import Blueprint, request, current_app
# from rowboat.redis import rdb
# from rowboat.util.decos import authed
from disco.types.message import MessageEmbed
from disco.types.webhook import Webhook
webhooks = Blueprint('webhooks', __name__, url_prefix='/webhooks')
@webhooks.route('/circle_ci', methods=['POST'])
def webhook_circle_ci():
data = request.json['payload']
embed = MessageEmbed()
if data['outcome'] == 'success':
embed.color = 0x42c88a
else:
embed.color = 0xed5c5c
embed.title = 'Build #{} - {} ({})'.format(
data['build_num'],
data['subject'],
data['author_name'],
)
embed.url = data['build_url']
steps = []
for step in data['steps']:
emoji = ':x:' if any(True for act in step['actions'] if act.get('failed', False)) else ':white_check_mark:'
steps.append('{} - {}'.format(
emoji,
step['name']
))
embed.description = '\n'.join(steps)
embed.description += '\n [View Diff]({})'.format(data['compare'])
wh = Webhook.from_url(current_app.config.get('WEBHOOK_URL'))
wh.execute(embeds=[embed])
if data['outcome'] != 'success':
return
subprocess.Popen(['git', 'pull', 'origin', 'master'])
return '', 200
|
17dc07a496d976b082189395498a476089d66108
|
chainer_datasets.py
|
chainer_datasets.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 13:09:27 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer.datasets import TupleDataset
import cars196_dataset
from my_iterators import SerialIterator
from indexes_samplers import NPairMCIndexesSampler
if __name__ == '__main__':
batch_size = 50
train = cars196_dataset.load_as_ndarray(['train'])[0]
x, labels = train
dataset = TupleDataset(x, labels)
num_batches = len(dataset) / batch_size
order_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, True, order_sampler=order_sampler)
for i in range(num_batches):
batch = next(it)
l = np.ravel([pair[1] for pair in batch]).tolist()
print i
print l[:batch_size/2]
print l[batch_size/2:]
print
|
Implement example codes to use Cars196 with the modified SerialIterator
|
Implement example codes to use Cars196 with the modified SerialIterator
|
Python
|
mit
|
ronekko/deep_metric_learning
|
Implement example codes to use Cars196 with the modified SerialIterator
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 13:09:27 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer.datasets import TupleDataset
import cars196_dataset
from my_iterators import SerialIterator
from indexes_samplers import NPairMCIndexesSampler
if __name__ == '__main__':
batch_size = 50
train = cars196_dataset.load_as_ndarray(['train'])[0]
x, labels = train
dataset = TupleDataset(x, labels)
num_batches = len(dataset) / batch_size
order_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, True, order_sampler=order_sampler)
for i in range(num_batches):
batch = next(it)
l = np.ravel([pair[1] for pair in batch]).tolist()
print i
print l[:batch_size/2]
print l[batch_size/2:]
print
|
<commit_before><commit_msg>Implement example codes to use Cars196 with the modified SerialIterator<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 13:09:27 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer.datasets import TupleDataset
import cars196_dataset
from my_iterators import SerialIterator
from indexes_samplers import NPairMCIndexesSampler
if __name__ == '__main__':
batch_size = 50
train = cars196_dataset.load_as_ndarray(['train'])[0]
x, labels = train
dataset = TupleDataset(x, labels)
num_batches = len(dataset) / batch_size
order_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, True, order_sampler=order_sampler)
for i in range(num_batches):
batch = next(it)
l = np.ravel([pair[1] for pair in batch]).tolist()
print i
print l[:batch_size/2]
print l[batch_size/2:]
print
|
Implement example codes to use Cars196 with the modified SerialIterator# -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 13:09:27 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer.datasets import TupleDataset
import cars196_dataset
from my_iterators import SerialIterator
from indexes_samplers import NPairMCIndexesSampler
if __name__ == '__main__':
batch_size = 50
train = cars196_dataset.load_as_ndarray(['train'])[0]
x, labels = train
dataset = TupleDataset(x, labels)
num_batches = len(dataset) / batch_size
order_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, True, order_sampler=order_sampler)
for i in range(num_batches):
batch = next(it)
l = np.ravel([pair[1] for pair in batch]).tolist()
print i
print l[:batch_size/2]
print l[batch_size/2:]
print
|
<commit_before><commit_msg>Implement example codes to use Cars196 with the modified SerialIterator<commit_after># -*- coding: utf-8 -*-
"""
Created on Mon Jan 23 13:09:27 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer.datasets import TupleDataset
import cars196_dataset
from my_iterators import SerialIterator
from indexes_samplers import NPairMCIndexesSampler
if __name__ == '__main__':
batch_size = 50
train = cars196_dataset.load_as_ndarray(['train'])[0]
x, labels = train
dataset = TupleDataset(x, labels)
num_batches = len(dataset) / batch_size
order_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, True, order_sampler=order_sampler)
for i in range(num_batches):
batch = next(it)
l = np.ravel([pair[1] for pair in batch]).tolist()
print i
print l[:batch_size/2]
print l[batch_size/2:]
print
|
|
abe3f5489fb91102ea45b85ea6200542e7ca5f67
|
test/unit/test_smplayservice.py
|
test/unit/test_smplayservice.py
|
import unittest
from hamcrest import *
from service.smplayservice import SmPlayServiceStarter
from smcontext import SmContext, SmApplication
class TestSmPlayService(unittest.TestCase):
def setUp(self):
sm_application = SmApplication("test/conf/", features = {})
sm_context = SmContext(sm_application, "")
self.sm_play_service = SmPlayServiceStarter(sm_context, "PLAY_NEXUS_END_TO_END_TEST", "", "", 9000, "", "", "", "")
def test_closed_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/basicplayapp"), has_item("2.149.0"))
def test_open_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/openplayapp"), has_item("2.150.0"))
|
Add tests for assets version extraction
|
Add tests for assets version extraction
|
Python
|
apache-2.0
|
hmrc/service-manager,hmrc/service-manager,hmrc/service-manager,hmrc/service-manager
|
Add tests for assets version extraction
|
import unittest
from hamcrest import *
from service.smplayservice import SmPlayServiceStarter
from smcontext import SmContext, SmApplication
class TestSmPlayService(unittest.TestCase):
def setUp(self):
sm_application = SmApplication("test/conf/", features = {})
sm_context = SmContext(sm_application, "")
self.sm_play_service = SmPlayServiceStarter(sm_context, "PLAY_NEXUS_END_TO_END_TEST", "", "", 9000, "", "", "", "")
def test_closed_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/basicplayapp"), has_item("2.149.0"))
def test_open_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/openplayapp"), has_item("2.150.0"))
|
<commit_before><commit_msg>Add tests for assets version extraction<commit_after>
|
import unittest
from hamcrest import *
from service.smplayservice import SmPlayServiceStarter
from smcontext import SmContext, SmApplication
class TestSmPlayService(unittest.TestCase):
def setUp(self):
sm_application = SmApplication("test/conf/", features = {})
sm_context = SmContext(sm_application, "")
self.sm_play_service = SmPlayServiceStarter(sm_context, "PLAY_NEXUS_END_TO_END_TEST", "", "", 9000, "", "", "", "")
def test_closed_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/basicplayapp"), has_item("2.149.0"))
def test_open_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/openplayapp"), has_item("2.150.0"))
|
Add tests for assets version extractionimport unittest
from hamcrest import *
from service.smplayservice import SmPlayServiceStarter
from smcontext import SmContext, SmApplication
class TestSmPlayService(unittest.TestCase):
def setUp(self):
sm_application = SmApplication("test/conf/", features = {})
sm_context = SmContext(sm_application, "")
self.sm_play_service = SmPlayServiceStarter(sm_context, "PLAY_NEXUS_END_TO_END_TEST", "", "", 9000, "", "", "", "")
def test_closed_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/basicplayapp"), has_item("2.149.0"))
def test_open_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/openplayapp"), has_item("2.150.0"))
|
<commit_before><commit_msg>Add tests for assets version extraction<commit_after>import unittest
from hamcrest import *
from service.smplayservice import SmPlayServiceStarter
from smcontext import SmContext, SmApplication
class TestSmPlayService(unittest.TestCase):
def setUp(self):
sm_application = SmApplication("test/conf/", features = {})
sm_context = SmContext(sm_application, "")
self.sm_play_service = SmPlayServiceStarter(sm_context, "PLAY_NEXUS_END_TO_END_TEST", "", "", 9000, "", "", "", "")
def test_closed_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/basicplayapp"), has_item("2.149.0"))
def test_open_assets_config(self):
assert_that(self.sm_play_service._get_assets_version("test/testapps/openplayapp"), has_item("2.150.0"))
|
|
3624a1d989723f257020c7e93be3f05c4ca45561
|
tests/formats_test/misc_test.py
|
tests/formats_test/misc_test.py
|
#!/usr/bin/python
import unittest
from blivet.formats import device_formats
import blivet.formats.fs as fs
class MethodsTestCase(unittest.TestCase):
"""Test some methods that do not require actual images."""
def setUp(self):
self.fs = {}
for k, v in device_formats.items():
if issubclass(v, fs.FS) and not issubclass(v, fs.NFS):
self.fs[k] = v(device="/dev")
def testGetLabelArgs(self):
self.longMessage = True
# ReiserFS is currently backwards, needs the label after the l flag
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.ReiserFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-l", "/dev", "myfs"], msg=k)
# JFS is backward as well
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.JFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "/dev", "myfs"], msg=k)
#XFS uses a -L label
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.XFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "myfs", "/dev"], msg=k)
# All NoDeviceFSs ignore the device argument passed and set device
# to the fs type
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.NoDevFS)]:
self.assertEqual(v._getLabelArgs("myfs"), [v.type, "myfs"], msg=k)
for k, v in [(k, v) for k, v in self.fs.items() if not (isinstance(v, fs.NoDevFS) or isinstance(v, fs.ReiserFS) or isinstance(v, fs.XFS) or isinstance(v, fs.JFS))]:
self.assertEqual(v._getLabelArgs("myfs"), ["/dev", "myfs"], msg=k)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MethodsTestCase)
return unittest.TestSuite(suite1)
if __name__ == "__main__":
unittest.main()
|
Add some simple tests for file formats.
|
Add some simple tests for file formats.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
Python
|
lgpl-2.1
|
rhinstaller/blivet,jkonecny12/blivet,dwlehman/blivet,AdamWill/blivet,vojtechtrefny/blivet,vojtechtrefny/blivet,vpodzime/blivet,rvykydal/blivet,dwlehman/blivet,rhinstaller/blivet,rvykydal/blivet,AdamWill/blivet,vpodzime/blivet,jkonecny12/blivet
|
Add some simple tests for file formats.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
#!/usr/bin/python
import unittest
from blivet.formats import device_formats
import blivet.formats.fs as fs
class MethodsTestCase(unittest.TestCase):
"""Test some methods that do not require actual images."""
def setUp(self):
self.fs = {}
for k, v in device_formats.items():
if issubclass(v, fs.FS) and not issubclass(v, fs.NFS):
self.fs[k] = v(device="/dev")
def testGetLabelArgs(self):
self.longMessage = True
# ReiserFS is currently backwards, needs the label after the l flag
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.ReiserFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-l", "/dev", "myfs"], msg=k)
# JFS is backward as well
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.JFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "/dev", "myfs"], msg=k)
#XFS uses a -L label
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.XFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "myfs", "/dev"], msg=k)
# All NoDeviceFSs ignore the device argument passed and set device
# to the fs type
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.NoDevFS)]:
self.assertEqual(v._getLabelArgs("myfs"), [v.type, "myfs"], msg=k)
for k, v in [(k, v) for k, v in self.fs.items() if not (isinstance(v, fs.NoDevFS) or isinstance(v, fs.ReiserFS) or isinstance(v, fs.XFS) or isinstance(v, fs.JFS))]:
self.assertEqual(v._getLabelArgs("myfs"), ["/dev", "myfs"], msg=k)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MethodsTestCase)
return unittest.TestSuite(suite1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some simple tests for file formats.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
|
#!/usr/bin/python
import unittest
from blivet.formats import device_formats
import blivet.formats.fs as fs
class MethodsTestCase(unittest.TestCase):
"""Test some methods that do not require actual images."""
def setUp(self):
self.fs = {}
for k, v in device_formats.items():
if issubclass(v, fs.FS) and not issubclass(v, fs.NFS):
self.fs[k] = v(device="/dev")
def testGetLabelArgs(self):
self.longMessage = True
# ReiserFS is currently backwards, needs the label after the l flag
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.ReiserFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-l", "/dev", "myfs"], msg=k)
# JFS is backward as well
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.JFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "/dev", "myfs"], msg=k)
#XFS uses a -L label
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.XFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "myfs", "/dev"], msg=k)
# All NoDeviceFSs ignore the device argument passed and set device
# to the fs type
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.NoDevFS)]:
self.assertEqual(v._getLabelArgs("myfs"), [v.type, "myfs"], msg=k)
for k, v in [(k, v) for k, v in self.fs.items() if not (isinstance(v, fs.NoDevFS) or isinstance(v, fs.ReiserFS) or isinstance(v, fs.XFS) or isinstance(v, fs.JFS))]:
self.assertEqual(v._getLabelArgs("myfs"), ["/dev", "myfs"], msg=k)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MethodsTestCase)
return unittest.TestSuite(suite1)
if __name__ == "__main__":
unittest.main()
|
Add some simple tests for file formats.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>#!/usr/bin/python
import unittest
from blivet.formats import device_formats
import blivet.formats.fs as fs
class MethodsTestCase(unittest.TestCase):
"""Test some methods that do not require actual images."""
def setUp(self):
self.fs = {}
for k, v in device_formats.items():
if issubclass(v, fs.FS) and not issubclass(v, fs.NFS):
self.fs[k] = v(device="/dev")
def testGetLabelArgs(self):
self.longMessage = True
# ReiserFS is currently backwards, needs the label after the l flag
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.ReiserFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-l", "/dev", "myfs"], msg=k)
# JFS is backward as well
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.JFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "/dev", "myfs"], msg=k)
#XFS uses a -L label
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.XFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "myfs", "/dev"], msg=k)
# All NoDeviceFSs ignore the device argument passed and set device
# to the fs type
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.NoDevFS)]:
self.assertEqual(v._getLabelArgs("myfs"), [v.type, "myfs"], msg=k)
for k, v in [(k, v) for k, v in self.fs.items() if not (isinstance(v, fs.NoDevFS) or isinstance(v, fs.ReiserFS) or isinstance(v, fs.XFS) or isinstance(v, fs.JFS))]:
self.assertEqual(v._getLabelArgs("myfs"), ["/dev", "myfs"], msg=k)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MethodsTestCase)
return unittest.TestSuite(suite1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some simple tests for file formats.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>#!/usr/bin/python
import unittest
from blivet.formats import device_formats
import blivet.formats.fs as fs
class MethodsTestCase(unittest.TestCase):
"""Test some methods that do not require actual images."""
def setUp(self):
self.fs = {}
for k, v in device_formats.items():
if issubclass(v, fs.FS) and not issubclass(v, fs.NFS):
self.fs[k] = v(device="/dev")
def testGetLabelArgs(self):
self.longMessage = True
# ReiserFS is currently backwards, needs the label after the l flag
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.ReiserFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-l", "/dev", "myfs"], msg=k)
# JFS is backward as well
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.JFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "/dev", "myfs"], msg=k)
#XFS uses a -L label
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.XFS)]:
self.assertEqual(v._getLabelArgs("myfs"), ["-L", "myfs", "/dev"], msg=k)
# All NoDeviceFSs ignore the device argument passed and set device
# to the fs type
for k, v in [(k, v) for k, v in self.fs.items() if isinstance(v, fs.NoDevFS)]:
self.assertEqual(v._getLabelArgs("myfs"), [v.type, "myfs"], msg=k)
for k, v in [(k, v) for k, v in self.fs.items() if not (isinstance(v, fs.NoDevFS) or isinstance(v, fs.ReiserFS) or isinstance(v, fs.XFS) or isinstance(v, fs.JFS))]:
self.assertEqual(v._getLabelArgs("myfs"), ["/dev", "myfs"], msg=k)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MethodsTestCase)
return unittest.TestSuite(suite1)
if __name__ == "__main__":
unittest.main()
|
|
a6f75a09d028a02681fd549f6841e8cce791fe56
|
stack_repeat_visit_spectra.py
|
stack_repeat_visit_spectra.py
|
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
Add script to co-add repeat visits of stars
|
Add script to co-add repeat visits of stars
|
Python
|
mit
|
AnnieJumpCannon/RAVE,AnnieJumpCannon/RAVE
|
Add script to co-add repeat visits of stars
|
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
<commit_before><commit_msg>Add script to co-add repeat visits of stars<commit_after>
|
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
Add script to co-add repeat visits of stars
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
<commit_before><commit_msg>Add script to co-add repeat visits of stars<commit_after>
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
|
146119213c6d96301b475f49cedde830e0f8e84f
|
kubernetes/client/apis/__init__.py
|
kubernetes/client/apis/__init__.py
|
from __future__ import absolute_import
import warnings
# flake8: noqa
# alias kubernetes.client.api package and print deprecation warning
from kubernetes.client.api import *
warnings.filterwarnings('default', module='kubernetes.client.apis')
warnings.warn(
"The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
DeprecationWarning
)
|
Add kubernetes.client.apis as an alias to kubernetes.client.api
|
Add kubernetes.client.apis as an alias to kubernetes.client.api
Reference: https://github.com/kubernetes-client/python/issues/974
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
|
Python
|
apache-2.0
|
kubernetes-client/python,kubernetes-client/python
|
Add kubernetes.client.apis as an alias to kubernetes.client.api
Reference: https://github.com/kubernetes-client/python/issues/974
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
|
from __future__ import absolute_import
import warnings
# flake8: noqa
# alias kubernetes.client.api package and print deprecation warning
from kubernetes.client.api import *
warnings.filterwarnings('default', module='kubernetes.client.apis')
warnings.warn(
"The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
DeprecationWarning
)
|
<commit_before><commit_msg>Add kubernetes.client.apis as an alias to kubernetes.client.api
Reference: https://github.com/kubernetes-client/python/issues/974
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after>
|
from __future__ import absolute_import
import warnings
# flake8: noqa
# alias kubernetes.client.api package and print deprecation warning
from kubernetes.client.api import *
warnings.filterwarnings('default', module='kubernetes.client.apis')
warnings.warn(
"The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
DeprecationWarning
)
|
Add kubernetes.client.apis as an alias to kubernetes.client.api
Reference: https://github.com/kubernetes-client/python/issues/974
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>from __future__ import absolute_import
import warnings
# flake8: noqa
# alias kubernetes.client.api package and print deprecation warning
from kubernetes.client.api import *
warnings.filterwarnings('default', module='kubernetes.client.apis')
warnings.warn(
"The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
DeprecationWarning
)
|
<commit_before><commit_msg>Add kubernetes.client.apis as an alias to kubernetes.client.api
Reference: https://github.com/kubernetes-client/python/issues/974
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after>from __future__ import absolute_import
import warnings
# flake8: noqa
# alias kubernetes.client.api package and print deprecation warning
from kubernetes.client.api import *
warnings.filterwarnings('default', module='kubernetes.client.apis')
warnings.warn(
"The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).",
DeprecationWarning
)
|
|
8d1a849026447e8bcf49d7e74dbd280f5b2a3e9e
|
string_fundamental/Anagram.py
|
string_fundamental/Anagram.py
|
"""
An anagram of a string is another string that contains same characters, only the order of characters can be different.
For example "qwerty" and "werqty" are anagrams of each other.
Below methods uses two lists to store the count of every character in respective strings which will be used to determine whether provided strings
are anagram or not.
"""
#Function to count of each character in string
def count_chars(string_to_be_evaluated):
count=[0]*26
string_to_be_evaluated=string_to_be_evaluated.lower()
for char in string_to_be_evaluated:
value=97-ord(char)
count[value]=count[value]+1
return count
#Function to check anagram of a string
def check_anagram(string_one,string_two):
count1=count_chars(string_one)
count2=count_chars(string_two)
if count1==count2:
return True
return False
if __name__ == '__main__':
string_one=input("Enter first string:")
string_two=input("Enter second string:")
is_anagram=check_anagram(string_one,string_two)
if is_anagram:
print("Provided strings are anagram.")
else:
print("Provided strings are not anagram.")
|
Add program for anagram in string_fundamental.
|
Add program for anagram in string_fundamental.
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add program for anagram in string_fundamental.
|
"""
An anagram of a string is another string that contains same characters, only the order of characters can be different.
For example "qwerty" and "werqty" are anagrams of each other.
Below methods uses two lists to store the count of every character in respective strings which will be used to determine whether provided strings
are anagram or not.
"""
#Function to count of each character in string
def count_chars(string_to_be_evaluated):
count=[0]*26
string_to_be_evaluated=string_to_be_evaluated.lower()
for char in string_to_be_evaluated:
value=97-ord(char)
count[value]=count[value]+1
return count
#Function to check anagram of a string
def check_anagram(string_one,string_two):
count1=count_chars(string_one)
count2=count_chars(string_two)
if count1==count2:
return True
return False
if __name__ == '__main__':
string_one=input("Enter first string:")
string_two=input("Enter second string:")
is_anagram=check_anagram(string_one,string_two)
if is_anagram:
print("Provided strings are anagram.")
else:
print("Provided strings are not anagram.")
|
<commit_before><commit_msg>Add program for anagram in string_fundamental.<commit_after>
|
"""
An anagram of a string is another string that contains same characters, only the order of characters can be different.
For example "qwerty" and "werqty" are anagrams of each other.
Below methods uses two lists to store the count of every character in respective strings which will be used to determine whether provided strings
are anagram or not.
"""
#Function to count of each character in string
def count_chars(string_to_be_evaluated):
count=[0]*26
string_to_be_evaluated=string_to_be_evaluated.lower()
for char in string_to_be_evaluated:
value=97-ord(char)
count[value]=count[value]+1
return count
#Function to check anagram of a string
def check_anagram(string_one,string_two):
count1=count_chars(string_one)
count2=count_chars(string_two)
if count1==count2:
return True
return False
if __name__ == '__main__':
string_one=input("Enter first string:")
string_two=input("Enter second string:")
is_anagram=check_anagram(string_one,string_two)
if is_anagram:
print("Provided strings are anagram.")
else:
print("Provided strings are not anagram.")
|
Add program for anagram in string_fundamental."""
An anagram of a string is another string that contains same characters, only the order of characters can be different.
For example "qwerty" and "werqty" are anagrams of each other.
Below methods uses two lists to store the count of every character in respective strings which will be used to determine whether provided strings
are anagram or not.
"""
#Function to count of each character in string
def count_chars(string_to_be_evaluated):
count=[0]*26
string_to_be_evaluated=string_to_be_evaluated.lower()
for char in string_to_be_evaluated:
value=97-ord(char)
count[value]=count[value]+1
return count
#Function to check anagram of a string
def check_anagram(string_one,string_two):
count1=count_chars(string_one)
count2=count_chars(string_two)
if count1==count2:
return True
return False
if __name__ == '__main__':
string_one=input("Enter first string:")
string_two=input("Enter second string:")
is_anagram=check_anagram(string_one,string_two)
if is_anagram:
print("Provided strings are anagram.")
else:
print("Provided strings are not anagram.")
|
<commit_before><commit_msg>Add program for anagram in string_fundamental.<commit_after>"""
An anagram of a string is another string that contains same characters, only the order of characters can be different.
For example "qwerty" and "werqty" are anagrams of each other.
Below methods uses two lists to store the count of every character in respective strings which will be used to determine whether provided strings
are anagram or not.
"""
#Function to count of each character in string
def count_chars(string_to_be_evaluated):
count=[0]*26
string_to_be_evaluated=string_to_be_evaluated.lower()
for char in string_to_be_evaluated:
value=97-ord(char)
count[value]=count[value]+1
return count
#Function to check anagram of a string
def check_anagram(string_one,string_two):
count1=count_chars(string_one)
count2=count_chars(string_two)
if count1==count2:
return True
return False
if __name__ == '__main__':
string_one=input("Enter first string:")
string_two=input("Enter second string:")
is_anagram=check_anagram(string_one,string_two)
if is_anagram:
print("Provided strings are anagram.")
else:
print("Provided strings are not anagram.")
|
|
059f084da18bf5e27aa0369bc4658dbccc6789a4
|
python/equality_in_a_array.py
|
python/equality_in_a_array.py
|
from collections import Counter
size = int(input().strip())
counts = Counter((map(int, input().strip().split(' '))))
element, count = counts.most_common(1)[0]
print (size - count)
|
Solve equality in a array
|
Solve equality in a array
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve equality in a array
|
from collections import Counter
size = int(input().strip())
counts = Counter((map(int, input().strip().split(' '))))
element, count = counts.most_common(1)[0]
print (size - count)
|
<commit_before><commit_msg>Solve equality in a array<commit_after>
|
from collections import Counter
size = int(input().strip())
counts = Counter((map(int, input().strip().split(' '))))
element, count = counts.most_common(1)[0]
print (size - count)
|
Solve equality in a arrayfrom collections import Counter
size = int(input().strip())
counts = Counter((map(int, input().strip().split(' '))))
element, count = counts.most_common(1)[0]
print (size - count)
|
<commit_before><commit_msg>Solve equality in a array<commit_after>from collections import Counter
size = int(input().strip())
counts = Counter((map(int, input().strip().split(' '))))
element, count = counts.most_common(1)[0]
print (size - count)
|
|
13332854f1d8a863a53504be6edac57f267b0569
|
tests/cpp/test_constraints.py
|
tests/cpp/test_constraints.py
|
import pytest
import toppra.cpp as tac
def test_linear_vel():
c = tac.LinearJointVelocity([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert c.hasUbounds()
assert c.hasXbounds()
assert not c.hasLinearInequalities()
def test_linear_accel():
c = tac.LinearJointAcceleration([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert not c.hasUbounds()
assert not c.hasXbounds()
assert c.hasLinearInequalities()
|
Add test for constraint bindings
|
Add test for constraint bindings
|
Python
|
mit
|
hungpham2511/toppra,hungpham2511/toppra,hungpham2511/toppra
|
Add test for constraint bindings
|
import pytest
import toppra.cpp as tac
def test_linear_vel():
c = tac.LinearJointVelocity([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert c.hasUbounds()
assert c.hasXbounds()
assert not c.hasLinearInequalities()
def test_linear_accel():
c = tac.LinearJointAcceleration([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert not c.hasUbounds()
assert not c.hasXbounds()
assert c.hasLinearInequalities()
|
<commit_before><commit_msg>Add test for constraint bindings<commit_after>
|
import pytest
import toppra.cpp as tac
def test_linear_vel():
c = tac.LinearJointVelocity([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert c.hasUbounds()
assert c.hasXbounds()
assert not c.hasLinearInequalities()
def test_linear_accel():
c = tac.LinearJointAcceleration([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert not c.hasUbounds()
assert not c.hasXbounds()
assert c.hasLinearInequalities()
|
Add test for constraint bindingsimport pytest
import toppra.cpp as tac
def test_linear_vel():
c = tac.LinearJointVelocity([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert c.hasUbounds()
assert c.hasXbounds()
assert not c.hasLinearInequalities()
def test_linear_accel():
c = tac.LinearJointAcceleration([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert not c.hasUbounds()
assert not c.hasXbounds()
assert c.hasLinearInequalities()
|
<commit_before><commit_msg>Add test for constraint bindings<commit_after>import pytest
import toppra.cpp as tac
def test_linear_vel():
c = tac.LinearJointVelocity([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert c.hasUbounds()
assert c.hasXbounds()
assert not c.hasLinearInequalities()
def test_linear_accel():
c = tac.LinearJointAcceleration([-1, -1], [1, 1])
c.discretizationType = tac.DiscretizationType.Interpolation
assert not c.hasUbounds()
assert not c.hasXbounds()
assert c.hasLinearInequalities()
|
|
a4efdbb5c0817080c3ad381110865ce08081761e
|
nanshe_workflow/_reg_joblib.py
|
nanshe_workflow/_reg_joblib.py
|
import dask
import dask.distributed
import distributed
try:
import dask.distributed.joblib
except ImportError:
pass
try:
import distributed.joblib
except ImportError:
pass
import sklearn
import sklearn.externals
import sklearn.externals.joblib
|
Add backwards compatible Distributed Joblib hook
|
Add backwards compatible Distributed Joblib hook
Depending on the versions of Distributed, Joblib, and scikit-learn,
there are different strategies for registering the Joblib backend. Try
going with the standard Distributed technique first, which may fail for
Distributed 1.24.0+. In other cases, import `joblib` and
`sklearn.externals.joblib`, which should handle this for Dask when
Joblib 0.12.2+ and scikit-learn 0.20.0+ are present. This should ensure
that all three are aware of each other and can work correctly.
This dummy module is only useful internally. So mark it as such. It can
be dropped at any time without explanation once convenient.
Using this design, we can comfortably use this in-place of the other
`import`s we were doing to achieve this effect.
|
Python
|
apache-2.0
|
DudLab/nanshe_workflow,nanshe-org/nanshe_workflow
|
Add backwards compatible Distributed Joblib hook
Depending on the versions of Distributed, Joblib, and scikit-learn,
there are different strategies for registering the Joblib backend. Try
going with the standard Distributed technique first, which may fail for
Distributed 1.24.0+. In other cases, import `joblib` and
`sklearn.externals.joblib`, which should handle this for Dask when
Joblib 0.12.2+ and scikit-learn 0.20.0+ are present. This should ensure
that all three are aware of each other and can work correctly.
This dummy module is only useful internally. So mark it as such. It can
be dropped at any time without explanation once convenient.
Using this design, we can comfortably use this in-place of the other
`import`s we were doing to achieve this effect.
|
import dask
import dask.distributed
import distributed
try:
import dask.distributed.joblib
except ImportError:
pass
try:
import distributed.joblib
except ImportError:
pass
import sklearn
import sklearn.externals
import sklearn.externals.joblib
|
<commit_before><commit_msg>Add backwards compatible Distributed Joblib hook
Depending on the versions of Distributed, Joblib, and scikit-learn,
there are different strategies for registering the Joblib backend. Try
going with the standard Distributed technique first, which may fail for
Distributed 1.24.0+. In other cases, import `joblib` and
`sklearn.externals.joblib`, which should handle this for Dask when
Joblib 0.12.2+ and scikit-learn 0.20.0+ are present. This should ensure
that all three are aware of each other and can work correctly.
This dummy module is only useful internally. So mark it as such. It can
be dropped at any time without explanation once convenient.
Using this design, we can comfortably use this in-place of the other
`import`s we were doing to achieve this effect.<commit_after>
|
import dask
import dask.distributed
import distributed
try:
import dask.distributed.joblib
except ImportError:
pass
try:
import distributed.joblib
except ImportError:
pass
import sklearn
import sklearn.externals
import sklearn.externals.joblib
|
Add backwards compatible Distributed Joblib hook
Depending on the versions of Distributed, Joblib, and scikit-learn,
there are different strategies for registering the Joblib backend. Try
going with the standard Distributed technique first, which may fail for
Distributed 1.24.0+. In other cases, import `joblib` and
`sklearn.externals.joblib`, which should handle this for Dask when
Joblib 0.12.2+ and scikit-learn 0.20.0+ are present. This should ensure
that all three are aware of each other and can work correctly.
This dummy module is only useful internally. So mark it as such. It can
be dropped at any time without explanation once convenient.
Using this design, we can comfortably use this in-place of the other
`import`s we were doing to achieve this effect.import dask
import dask.distributed
import distributed
try:
import dask.distributed.joblib
except ImportError:
pass
try:
import distributed.joblib
except ImportError:
pass
import sklearn
import sklearn.externals
import sklearn.externals.joblib
|
<commit_before><commit_msg>Add backwards compatible Distributed Joblib hook
Depending on the versions of Distributed, Joblib, and scikit-learn,
there are different strategies for registering the Joblib backend. Try
going with the standard Distributed technique first, which may fail for
Distributed 1.24.0+. In other cases, import `joblib` and
`sklearn.externals.joblib`, which should handle this for Dask when
Joblib 0.12.2+ and scikit-learn 0.20.0+ are present. This should ensure
that all three are aware of each other and can work correctly.
This dummy module is only useful internally. So mark it as such. It can
be dropped at any time without explanation once convenient.
Using this design, we can comfortably use this in-place of the other
`import`s we were doing to achieve this effect.<commit_after>import dask
import dask.distributed
import distributed
try:
import dask.distributed.joblib
except ImportError:
pass
try:
import distributed.joblib
except ImportError:
pass
import sklearn
import sklearn.externals
import sklearn.externals.joblib
|
|
05680d0d5a88adaae83d144558eeee5af1ce0c02
|
python/paddle/v2/framework/tests/test_seq_pool.py
|
python/paddle/v2/framework/tests/test_seq_pool.py
|
import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
|
Add unit testing for sequence average pool operator.
|
Add unit testing for sequence average pool operator.
|
Python
|
apache-2.0
|
luotao1/Paddle,hedaoyuan/Paddle,lcy-seso/Paddle,baidu/Paddle,PaddlePaddle/Paddle,reyoung/Paddle,reyoung/Paddle,pengli09/Paddle,hedaoyuan/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,lcy-seso/Paddle,jacquesqiao/Paddle,hedaoyuan/Paddle,chengduoZH/Paddle,pengli09/Paddle,chengduoZH/Paddle,putcn/Paddle,tensor-tang/Paddle,pkuyym/Paddle,pengli09/Paddle,jacquesqiao/Paddle,QiJune/Paddle,lcy-seso/Paddle,PaddlePaddle/Paddle,reyoung/Paddle,pkuyym/Paddle,hedaoyuan/Paddle,tensor-tang/Paddle,QiJune/Paddle,lcy-seso/Paddle,QiJune/Paddle,lcy-seso/Paddle,jacquesqiao/Paddle,QiJune/Paddle,pkuyym/Paddle,hedaoyuan/Paddle,tensor-tang/Paddle,chengduoZH/Paddle,baidu/Paddle,lcy-seso/Paddle,jacquesqiao/Paddle,baidu/Paddle,pkuyym/Paddle,PaddlePaddle/Paddle,pengli09/Paddle,pkuyym/Paddle,Canpio/Paddle,baidu/Paddle,PaddlePaddle/Paddle,PaddlePaddle/Paddle,Canpio/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,chengduoZH/Paddle,Canpio/Paddle,hedaoyuan/Paddle,pengli09/Paddle,baidu/Paddle,pengli09/Paddle,putcn/Paddle,chengduoZH/Paddle,Canpio/Paddle,Canpio/Paddle,tensor-tang/Paddle,jacquesqiao/Paddle,luotao1/Paddle,jacquesqiao/Paddle,putcn/Paddle,luotao1/Paddle,tensor-tang/Paddle,hedaoyuan/Paddle,Canpio/Paddle,reyoung/Paddle,QiJune/Paddle,reyoung/Paddle,pengli09/Paddle,Canpio/Paddle,Canpio/Paddle,hedaoyuan/Paddle,putcn/Paddle,putcn/Paddle,luotao1/Paddle,reyoung/Paddle,pengli09/Paddle,luotao1/Paddle,putcn/Paddle,pkuyym/Paddle,QiJune/Paddle
|
Add unit testing for sequence average pool operator.
|
import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit testing for sequence average pool operator.<commit_after>
|
import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
|
Add unit testing for sequence average pool operator.import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit testing for sequence average pool operator.<commit_after>import unittest
import numpy as np
from op_test import OpTest
class TestSeqAvgPool1D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[0, 4, 5, 8, 11]]
out = np.zeros((4, 23)).astype('float32')
for i in range(4):
sub_x = x[lod[0][i]:lod[0][i + 1], :]
out[i] = sub_x.mean(axis=0)
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSeqAvgPool2D(OpTest):
def setUp(self):
self.op_type = 'sequence_avg_pool'
# one level, batch size is 4
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
lod = [[0, 4, 5, 8, 13]]
out = np.zeros((4, 3, 17)).astype('float32')
for i in range(4):
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
self.inputs = {'X': (x, lod)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
|
|
51760a4cf96074b9d2eb609451512b3fafff7aaa
|
make_test_data.py
|
make_test_data.py
|
import sqlite3
INSERT_SONG = '''
INSERT INTO jukebox_song_queue VALUES (?)
'''
TEST_URIS = [
'spotify:track:5lB3bZKPhng9s4hKB1sSIe',
'spotify:track:5MSfgtOBZkbxlcwsI9XNpf',
'spotify:track:1shuGbTnKx4AXjlx7IauM5'
]
if __name__ == '__main__':
conn = sqlite3.connect('jukebox.db')
cursor = conn.cursor()
for uri in TEST_URIS:
uri = (uri,)
cursor.execute(INSERT_SONG, uri)
conn.commit()
conn.close()
|
Add script to make some test data
|
Add script to make some test data
|
Python
|
mit
|
projectweekend/Pi-Jukebox,projectweekend/Pi-Jukebox,projectweekend/Pi-Jukebox
|
Add script to make some test data
|
import sqlite3
INSERT_SONG = '''
INSERT INTO jukebox_song_queue VALUES (?)
'''
TEST_URIS = [
'spotify:track:5lB3bZKPhng9s4hKB1sSIe',
'spotify:track:5MSfgtOBZkbxlcwsI9XNpf',
'spotify:track:1shuGbTnKx4AXjlx7IauM5'
]
if __name__ == '__main__':
conn = sqlite3.connect('jukebox.db')
cursor = conn.cursor()
for uri in TEST_URIS:
uri = (uri,)
cursor.execute(INSERT_SONG, uri)
conn.commit()
conn.close()
|
<commit_before><commit_msg>Add script to make some test data<commit_after>
|
import sqlite3
INSERT_SONG = '''
INSERT INTO jukebox_song_queue VALUES (?)
'''
TEST_URIS = [
'spotify:track:5lB3bZKPhng9s4hKB1sSIe',
'spotify:track:5MSfgtOBZkbxlcwsI9XNpf',
'spotify:track:1shuGbTnKx4AXjlx7IauM5'
]
if __name__ == '__main__':
conn = sqlite3.connect('jukebox.db')
cursor = conn.cursor()
for uri in TEST_URIS:
uri = (uri,)
cursor.execute(INSERT_SONG, uri)
conn.commit()
conn.close()
|
Add script to make some test dataimport sqlite3
INSERT_SONG = '''
INSERT INTO jukebox_song_queue VALUES (?)
'''
TEST_URIS = [
'spotify:track:5lB3bZKPhng9s4hKB1sSIe',
'spotify:track:5MSfgtOBZkbxlcwsI9XNpf',
'spotify:track:1shuGbTnKx4AXjlx7IauM5'
]
if __name__ == '__main__':
conn = sqlite3.connect('jukebox.db')
cursor = conn.cursor()
for uri in TEST_URIS:
uri = (uri,)
cursor.execute(INSERT_SONG, uri)
conn.commit()
conn.close()
|
<commit_before><commit_msg>Add script to make some test data<commit_after>import sqlite3
INSERT_SONG = '''
INSERT INTO jukebox_song_queue VALUES (?)
'''
TEST_URIS = [
'spotify:track:5lB3bZKPhng9s4hKB1sSIe',
'spotify:track:5MSfgtOBZkbxlcwsI9XNpf',
'spotify:track:1shuGbTnKx4AXjlx7IauM5'
]
if __name__ == '__main__':
conn = sqlite3.connect('jukebox.db')
cursor = conn.cursor()
for uri in TEST_URIS:
uri = (uri,)
cursor.execute(INSERT_SONG, uri)
conn.commit()
conn.close()
|
|
29d2f9f79917cebaf5e126b4ddac4f341ef2a2ae
|
etk/cli/ontodocgen.py
|
etk/cli/ontodocgen.py
|
from etk.ontology_api import Ontology
from etk.ontology_report_generator import OntologyReportGenerator
def add_arguments(parser):
parser.description = 'Generate HTML report for the input ontology files'
parser.add_argument('files', nargs='+', help='Input turtle files.')
parser.add_argument('--no-validation', action='store_false', dest='validation', default=True,
help='Don\'t perform domain and range validation.')
parser.add_argument('-o', '--output', dest='out', default='ontology-doc.html',
help='Location of generated HTML report.')
parser.add_argument('-i', '--include-undefined-classes', action='store_true',
dest='include_class', default=False, help='Include those undefined classes '
'but referenced by others.')
parser.add_argument('-t', '--include-turtle', action='store_true', dest='include_turtle',
default=False, help='Include turtle related to this entity. NOTE: this may '
'takes longer time.')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Suppress warning.')
parser.add_argument('--exclude-warning', action='store_true', dest='exclude_warning',
default=False, help='Exclude warning messages in HTML report')
return parser
def run(args):
contents = [open(f).read() for f in args.files]
ontology = Ontology(contents, validation=args.validation, include_undefined_class=args.include_class,
quiet=args.quiet)
doc_content = OntologyReportGenerator(ontology).generate_html_report(include_turtle=args.include_turtle,
exclude_warning=args.exclude_warning)
with open(args.out, "w") as f:
f.write(doc_content)
|
Add a cli for ontology report generator
|
Add a cli for ontology report generator
|
Python
|
mit
|
usc-isi-i2/etk,usc-isi-i2/etk,usc-isi-i2/etk
|
Add a cli for ontology report generator
|
from etk.ontology_api import Ontology
from etk.ontology_report_generator import OntologyReportGenerator
def add_arguments(parser):
parser.description = 'Generate HTML report for the input ontology files'
parser.add_argument('files', nargs='+', help='Input turtle files.')
parser.add_argument('--no-validation', action='store_false', dest='validation', default=True,
help='Don\'t perform domain and range validation.')
parser.add_argument('-o', '--output', dest='out', default='ontology-doc.html',
help='Location of generated HTML report.')
parser.add_argument('-i', '--include-undefined-classes', action='store_true',
dest='include_class', default=False, help='Include those undefined classes '
'but referenced by others.')
parser.add_argument('-t', '--include-turtle', action='store_true', dest='include_turtle',
default=False, help='Include turtle related to this entity. NOTE: this may '
'takes longer time.')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Suppress warning.')
parser.add_argument('--exclude-warning', action='store_true', dest='exclude_warning',
default=False, help='Exclude warning messages in HTML report')
return parser
def run(args):
contents = [open(f).read() for f in args.files]
ontology = Ontology(contents, validation=args.validation, include_undefined_class=args.include_class,
quiet=args.quiet)
doc_content = OntologyReportGenerator(ontology).generate_html_report(include_turtle=args.include_turtle,
exclude_warning=args.exclude_warning)
with open(args.out, "w") as f:
f.write(doc_content)
|
<commit_before><commit_msg>Add a cli for ontology report generator<commit_after>
|
from etk.ontology_api import Ontology
from etk.ontology_report_generator import OntologyReportGenerator
def add_arguments(parser):
parser.description = 'Generate HTML report for the input ontology files'
parser.add_argument('files', nargs='+', help='Input turtle files.')
parser.add_argument('--no-validation', action='store_false', dest='validation', default=True,
help='Don\'t perform domain and range validation.')
parser.add_argument('-o', '--output', dest='out', default='ontology-doc.html',
help='Location of generated HTML report.')
parser.add_argument('-i', '--include-undefined-classes', action='store_true',
dest='include_class', default=False, help='Include those undefined classes '
'but referenced by others.')
parser.add_argument('-t', '--include-turtle', action='store_true', dest='include_turtle',
default=False, help='Include turtle related to this entity. NOTE: this may '
'takes longer time.')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Suppress warning.')
parser.add_argument('--exclude-warning', action='store_true', dest='exclude_warning',
default=False, help='Exclude warning messages in HTML report')
return parser
def run(args):
contents = [open(f).read() for f in args.files]
ontology = Ontology(contents, validation=args.validation, include_undefined_class=args.include_class,
quiet=args.quiet)
doc_content = OntologyReportGenerator(ontology).generate_html_report(include_turtle=args.include_turtle,
exclude_warning=args.exclude_warning)
with open(args.out, "w") as f:
f.write(doc_content)
|
Add a cli for ontology report generatorfrom etk.ontology_api import Ontology
from etk.ontology_report_generator import OntologyReportGenerator
def add_arguments(parser):
parser.description = 'Generate HTML report for the input ontology files'
parser.add_argument('files', nargs='+', help='Input turtle files.')
parser.add_argument('--no-validation', action='store_false', dest='validation', default=True,
help='Don\'t perform domain and range validation.')
parser.add_argument('-o', '--output', dest='out', default='ontology-doc.html',
help='Location of generated HTML report.')
parser.add_argument('-i', '--include-undefined-classes', action='store_true',
dest='include_class', default=False, help='Include those undefined classes '
'but referenced by others.')
parser.add_argument('-t', '--include-turtle', action='store_true', dest='include_turtle',
default=False, help='Include turtle related to this entity. NOTE: this may '
'takes longer time.')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Suppress warning.')
parser.add_argument('--exclude-warning', action='store_true', dest='exclude_warning',
default=False, help='Exclude warning messages in HTML report')
return parser
def run(args):
contents = [open(f).read() for f in args.files]
ontology = Ontology(contents, validation=args.validation, include_undefined_class=args.include_class,
quiet=args.quiet)
doc_content = OntologyReportGenerator(ontology).generate_html_report(include_turtle=args.include_turtle,
exclude_warning=args.exclude_warning)
with open(args.out, "w") as f:
f.write(doc_content)
|
<commit_before><commit_msg>Add a cli for ontology report generator<commit_after>from etk.ontology_api import Ontology
from etk.ontology_report_generator import OntologyReportGenerator
def add_arguments(parser):
parser.description = 'Generate HTML report for the input ontology files'
parser.add_argument('files', nargs='+', help='Input turtle files.')
parser.add_argument('--no-validation', action='store_false', dest='validation', default=True,
help='Don\'t perform domain and range validation.')
parser.add_argument('-o', '--output', dest='out', default='ontology-doc.html',
help='Location of generated HTML report.')
parser.add_argument('-i', '--include-undefined-classes', action='store_true',
dest='include_class', default=False, help='Include those undefined classes '
'but referenced by others.')
parser.add_argument('-t', '--include-turtle', action='store_true', dest='include_turtle',
default=False, help='Include turtle related to this entity. NOTE: this may '
'takes longer time.')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Suppress warning.')
parser.add_argument('--exclude-warning', action='store_true', dest='exclude_warning',
default=False, help='Exclude warning messages in HTML report')
return parser
def run(args):
contents = [open(f).read() for f in args.files]
ontology = Ontology(contents, validation=args.validation, include_undefined_class=args.include_class,
quiet=args.quiet)
doc_content = OntologyReportGenerator(ontology).generate_html_report(include_turtle=args.include_turtle,
exclude_warning=args.exclude_warning)
with open(args.out, "w") as f:
f.write(doc_content)
|
|
9ff26b3e351e9bb686d59529454b00b52afde0af
|
us_ignite/maps/utils.py
|
us_ignite/maps/utils.py
|
def get_location_dict(item, location_type):
return {
'type': location_type,
'latitude': item.position.latitude,
'longitude': item.position.longitude,
'name': item.name,
'website': item.get_absolute_url(),
'category': '',
'image': '',
'content': item.name,
}
|
Relocate dict generator for the locations.
|
Relocate dict generator for the locations.
|
Python
|
bsd-3-clause
|
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
|
Relocate dict generator for the locations.
|
def get_location_dict(item, location_type):
return {
'type': location_type,
'latitude': item.position.latitude,
'longitude': item.position.longitude,
'name': item.name,
'website': item.get_absolute_url(),
'category': '',
'image': '',
'content': item.name,
}
|
<commit_before><commit_msg>Relocate dict generator for the locations.<commit_after>
|
def get_location_dict(item, location_type):
return {
'type': location_type,
'latitude': item.position.latitude,
'longitude': item.position.longitude,
'name': item.name,
'website': item.get_absolute_url(),
'category': '',
'image': '',
'content': item.name,
}
|
Relocate dict generator for the locations.def get_location_dict(item, location_type):
return {
'type': location_type,
'latitude': item.position.latitude,
'longitude': item.position.longitude,
'name': item.name,
'website': item.get_absolute_url(),
'category': '',
'image': '',
'content': item.name,
}
|
<commit_before><commit_msg>Relocate dict generator for the locations.<commit_after>def get_location_dict(item, location_type):
return {
'type': location_type,
'latitude': item.position.latitude,
'longitude': item.position.longitude,
'name': item.name,
'website': item.get_absolute_url(),
'category': '',
'image': '',
'content': item.name,
}
|
|
ee1343d8e1bbe0ad8533125b28956b150a41147a
|
Lib/test/test_gettext.py
|
Lib/test/test_gettext.py
|
import os
import gettext
def get_qualified_path(name):
"""Return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# Test basic interface
os.environ['LANGUAGE'] = 'xx'
mofile = get_qualified_path('xx')
localedir = os.path.dirname(mofile)
print 'installing gettext'
gettext.install()
print _('calling bindtextdomain with localedir %s') % localedir
print gettext.bindtextdomain('gettext', localedir)
print gettext.bindtextdomain()
print gettext.textdomain('gettext')
print gettext.textdomain()
# test some translations
print _(u'mullusk')
print _(r'Raymond Luxury Yach-t')
print _(ur'nudge nudge')
# double quotes
print _(u"mullusk")
print _(r"Raymond Luxury Yach-t")
print _(ur"nudge nudge")
# triple single quotes
print _(u'''mullusk''')
print _(r'''Raymond Luxury Yach-t''')
print _(ur'''nudge nudge''')
# triple double quotes
print _(u"""mullusk""")
print _(r"""Raymond Luxury Yach-t""")
print _(ur"""nudge nudge""")
# multiline strings
print _('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
print gettext.dgettext('gettext', 'nudge nudge')
# dcgettext
##import locale
##if gettext.dcgettext('gettext', 'nudge nudge',
## locale.LC_MESSAGES) <> 'wink wink':
## print _('dcgettext failed')
# test the alternative interface
fp = open(os.path.join(mofile, 'LC_MESSAGES', 'gettext.mo'), 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
gettext.set(t)
print t == gettext.get()
print _('nudge nudge')
|
Test suite for new gettext.py module.
|
Test suite for new gettext.py module.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Test suite for new gettext.py module.
|
import os
import gettext
def get_qualified_path(name):
"""Return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# Test basic interface
os.environ['LANGUAGE'] = 'xx'
mofile = get_qualified_path('xx')
localedir = os.path.dirname(mofile)
print 'installing gettext'
gettext.install()
print _('calling bindtextdomain with localedir %s') % localedir
print gettext.bindtextdomain('gettext', localedir)
print gettext.bindtextdomain()
print gettext.textdomain('gettext')
print gettext.textdomain()
# test some translations
print _(u'mullusk')
print _(r'Raymond Luxury Yach-t')
print _(ur'nudge nudge')
# double quotes
print _(u"mullusk")
print _(r"Raymond Luxury Yach-t")
print _(ur"nudge nudge")
# triple single quotes
print _(u'''mullusk''')
print _(r'''Raymond Luxury Yach-t''')
print _(ur'''nudge nudge''')
# triple double quotes
print _(u"""mullusk""")
print _(r"""Raymond Luxury Yach-t""")
print _(ur"""nudge nudge""")
# multiline strings
print _('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
print gettext.dgettext('gettext', 'nudge nudge')
# dcgettext
##import locale
##if gettext.dcgettext('gettext', 'nudge nudge',
## locale.LC_MESSAGES) <> 'wink wink':
## print _('dcgettext failed')
# test the alternative interface
fp = open(os.path.join(mofile, 'LC_MESSAGES', 'gettext.mo'), 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
gettext.set(t)
print t == gettext.get()
print _('nudge nudge')
|
<commit_before><commit_msg>Test suite for new gettext.py module.<commit_after>
|
import os
import gettext
def get_qualified_path(name):
"""Return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# Test basic interface
os.environ['LANGUAGE'] = 'xx'
mofile = get_qualified_path('xx')
localedir = os.path.dirname(mofile)
print 'installing gettext'
gettext.install()
print _('calling bindtextdomain with localedir %s') % localedir
print gettext.bindtextdomain('gettext', localedir)
print gettext.bindtextdomain()
print gettext.textdomain('gettext')
print gettext.textdomain()
# test some translations
print _(u'mullusk')
print _(r'Raymond Luxury Yach-t')
print _(ur'nudge nudge')
# double quotes
print _(u"mullusk")
print _(r"Raymond Luxury Yach-t")
print _(ur"nudge nudge")
# triple single quotes
print _(u'''mullusk''')
print _(r'''Raymond Luxury Yach-t''')
print _(ur'''nudge nudge''')
# triple double quotes
print _(u"""mullusk""")
print _(r"""Raymond Luxury Yach-t""")
print _(ur"""nudge nudge""")
# multiline strings
print _('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
print gettext.dgettext('gettext', 'nudge nudge')
# dcgettext
##import locale
##if gettext.dcgettext('gettext', 'nudge nudge',
## locale.LC_MESSAGES) <> 'wink wink':
## print _('dcgettext failed')
# test the alternative interface
fp = open(os.path.join(mofile, 'LC_MESSAGES', 'gettext.mo'), 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
gettext.set(t)
print t == gettext.get()
print _('nudge nudge')
|
Test suite for new gettext.py module.import os
import gettext
def get_qualified_path(name):
"""Return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# Test basic interface
os.environ['LANGUAGE'] = 'xx'
mofile = get_qualified_path('xx')
localedir = os.path.dirname(mofile)
print 'installing gettext'
gettext.install()
print _('calling bindtextdomain with localedir %s') % localedir
print gettext.bindtextdomain('gettext', localedir)
print gettext.bindtextdomain()
print gettext.textdomain('gettext')
print gettext.textdomain()
# test some translations
print _(u'mullusk')
print _(r'Raymond Luxury Yach-t')
print _(ur'nudge nudge')
# double quotes
print _(u"mullusk")
print _(r"Raymond Luxury Yach-t")
print _(ur"nudge nudge")
# triple single quotes
print _(u'''mullusk''')
print _(r'''Raymond Luxury Yach-t''')
print _(ur'''nudge nudge''')
# triple double quotes
print _(u"""mullusk""")
print _(r"""Raymond Luxury Yach-t""")
print _(ur"""nudge nudge""")
# multiline strings
print _('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
print gettext.dgettext('gettext', 'nudge nudge')
# dcgettext
##import locale
##if gettext.dcgettext('gettext', 'nudge nudge',
## locale.LC_MESSAGES) <> 'wink wink':
## print _('dcgettext failed')
# test the alternative interface
fp = open(os.path.join(mofile, 'LC_MESSAGES', 'gettext.mo'), 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
gettext.set(t)
print t == gettext.get()
print _('nudge nudge')
|
<commit_before><commit_msg>Test suite for new gettext.py module.<commit_after>import os
import gettext
def get_qualified_path(name):
"""Return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# Test basic interface
os.environ['LANGUAGE'] = 'xx'
mofile = get_qualified_path('xx')
localedir = os.path.dirname(mofile)
print 'installing gettext'
gettext.install()
print _('calling bindtextdomain with localedir %s') % localedir
print gettext.bindtextdomain('gettext', localedir)
print gettext.bindtextdomain()
print gettext.textdomain('gettext')
print gettext.textdomain()
# test some translations
print _(u'mullusk')
print _(r'Raymond Luxury Yach-t')
print _(ur'nudge nudge')
# double quotes
print _(u"mullusk")
print _(r"Raymond Luxury Yach-t")
print _(ur"nudge nudge")
# triple single quotes
print _(u'''mullusk''')
print _(r'''Raymond Luxury Yach-t''')
print _(ur'''nudge nudge''')
# triple double quotes
print _(u"""mullusk""")
print _(r"""Raymond Luxury Yach-t""")
print _(ur"""nudge nudge""")
# multiline strings
print _('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.''')
print gettext.dgettext('gettext', 'nudge nudge')
# dcgettext
##import locale
##if gettext.dcgettext('gettext', 'nudge nudge',
## locale.LC_MESSAGES) <> 'wink wink':
## print _('dcgettext failed')
# test the alternative interface
fp = open(os.path.join(mofile, 'LC_MESSAGES', 'gettext.mo'), 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
gettext.set(t)
print t == gettext.get()
print _('nudge nudge')
|
|
2b5cad439ef59cf1aa1ebec396f02b1bec7cdaaa
|
sum_primes/sum_primes.py
|
sum_primes/sum_primes.py
|
def gen_primes():
# logic from Sieve of Eratosthenes
# by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# changed var names
sieve = {}
check_int = 2
while True:
if check_int not in sieve:
yield check_int
sieve[check_int * check_int] = [check_int]
else:
for number in sieve[check_int]:
sieve.setdefault(number + check_int, []).append(number)
del sieve[check_int]
check_int += 1
if __name__ == '__main__':
sum = 0
primes = gen_primes()
for i in xrange(1000):
sum += primes.next()
print sum
|
Implement solution for sum of first 1000 primes
|
Implement solution for sum of first 1000 primes
|
Python
|
mit
|
MikeDelaney/CodeEval
|
Implement solution for sum of first 1000 primes
|
def gen_primes():
# logic from Sieve of Eratosthenes
# by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# changed var names
sieve = {}
check_int = 2
while True:
if check_int not in sieve:
yield check_int
sieve[check_int * check_int] = [check_int]
else:
for number in sieve[check_int]:
sieve.setdefault(number + check_int, []).append(number)
del sieve[check_int]
check_int += 1
if __name__ == '__main__':
sum = 0
primes = gen_primes()
for i in xrange(1000):
sum += primes.next()
print sum
|
<commit_before><commit_msg>Implement solution for sum of first 1000 primes<commit_after>
|
def gen_primes():
# logic from Sieve of Eratosthenes
# by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# changed var names
sieve = {}
check_int = 2
while True:
if check_int not in sieve:
yield check_int
sieve[check_int * check_int] = [check_int]
else:
for number in sieve[check_int]:
sieve.setdefault(number + check_int, []).append(number)
del sieve[check_int]
check_int += 1
if __name__ == '__main__':
sum = 0
primes = gen_primes()
for i in xrange(1000):
sum += primes.next()
print sum
|
Implement solution for sum of first 1000 primes
def gen_primes():
# logic from Sieve of Eratosthenes
# by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# changed var names
sieve = {}
check_int = 2
while True:
if check_int not in sieve:
yield check_int
sieve[check_int * check_int] = [check_int]
else:
for number in sieve[check_int]:
sieve.setdefault(number + check_int, []).append(number)
del sieve[check_int]
check_int += 1
if __name__ == '__main__':
sum = 0
primes = gen_primes()
for i in xrange(1000):
sum += primes.next()
print sum
|
<commit_before><commit_msg>Implement solution for sum of first 1000 primes<commit_after>
def gen_primes():
# logic from Sieve of Eratosthenes
# by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
# changed var names
sieve = {}
check_int = 2
while True:
if check_int not in sieve:
yield check_int
sieve[check_int * check_int] = [check_int]
else:
for number in sieve[check_int]:
sieve.setdefault(number + check_int, []).append(number)
del sieve[check_int]
check_int += 1
if __name__ == '__main__':
sum = 0
primes = gen_primes()
for i in xrange(1000):
sum += primes.next()
print sum
|
|
7b22421343cc29695f254c9334db0481c2c34f64
|
numba2/runtime/obj/intobject.py
|
numba2/runtime/obj/intobject.py
|
# -*- coding: utf-8 -*-
"""
int/long implementation.
"""
from __future__ import print_function, division, absolute_import
from ... import jit, implements, typeof
from ..interfaces import Number
@implements('Int[nbits]', Number)
class Int(object):
pass
@typeof.case((int, long))
def typeof(pyval):
return Int[32]
|
Add dummy int/long object implementing Number
|
Add dummy int/long object implementing Number
|
Python
|
bsd-2-clause
|
flypy/flypy,flypy/flypy
|
Add dummy int/long object implementing Number
|
# -*- coding: utf-8 -*-
"""
int/long implementation.
"""
from __future__ import print_function, division, absolute_import
from ... import jit, implements, typeof
from ..interfaces import Number
@implements('Int[nbits]', Number)
class Int(object):
pass
@typeof.case((int, long))
def typeof(pyval):
return Int[32]
|
<commit_before><commit_msg>Add dummy int/long object implementing Number<commit_after>
|
# -*- coding: utf-8 -*-
"""
int/long implementation.
"""
from __future__ import print_function, division, absolute_import
from ... import jit, implements, typeof
from ..interfaces import Number
@implements('Int[nbits]', Number)
class Int(object):
pass
@typeof.case((int, long))
def typeof(pyval):
return Int[32]
|
Add dummy int/long object implementing Number# -*- coding: utf-8 -*-
"""
int/long implementation.
"""
from __future__ import print_function, division, absolute_import
from ... import jit, implements, typeof
from ..interfaces import Number
@implements('Int[nbits]', Number)
class Int(object):
pass
@typeof.case((int, long))
def typeof(pyval):
return Int[32]
|
<commit_before><commit_msg>Add dummy int/long object implementing Number<commit_after># -*- coding: utf-8 -*-
"""
int/long implementation.
"""
from __future__ import print_function, division, absolute_import
from ... import jit, implements, typeof
from ..interfaces import Number
@implements('Int[nbits]', Number)
class Int(object):
pass
@typeof.case((int, long))
def typeof(pyval):
return Int[32]
|
|
284913283fa2340681bdfb5ca3ca3faef9d404b4
|
app/soc/modules/gci/views/helper/url_patterns.py
|
app/soc/modules/gci/views/helper/url_patterns.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing GCI related URL patterns
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from django.conf.urls.defaults import url as django_url
from soc.views.helper import url_patterns
def url(regex, view, kwargs=None, name=None):
"""Constructs an url pattern prefixed with ^gci/.
Args: see django.conf.urls.defaults.url
"""
return django_url('^gci/%s' % regex, view, kwargs=kwargs, name=name)
TASK = url_patterns.namedLinkIdPattern(['sponsor', 'program',
'organization', 'task'])
|
Implement GCI url patterns module based on the core url patterns module.
|
Implement GCI url patterns module based on the core url patterns module.
This code is partly taken from Selwyn's patches during GSoC 2011. Thanks
to Selwyn for the patch.
--HG--
extra : rebase_source : a64b9d703c2e309084a77aec627b8c7160186c66
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Implement GCI url patterns module based on the core url patterns module.
This code is partly taken from Selwyn's patches during GSoC 2011. Thanks
to Selwyn for the patch.
--HG--
extra : rebase_source : a64b9d703c2e309084a77aec627b8c7160186c66
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing GCI related URL patterns
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from django.conf.urls.defaults import url as django_url
from soc.views.helper import url_patterns
def url(regex, view, kwargs=None, name=None):
"""Constructs an url pattern prefixed with ^gci/.
Args: see django.conf.urls.defaults.url
"""
return django_url('^gci/%s' % regex, view, kwargs=kwargs, name=name)
TASK = url_patterns.namedLinkIdPattern(['sponsor', 'program',
'organization', 'task'])
|
<commit_before><commit_msg>Implement GCI url patterns module based on the core url patterns module.
This code is partly taken from Selwyn's patches during GSoC 2011. Thanks
to Selwyn for the patch.
--HG--
extra : rebase_source : a64b9d703c2e309084a77aec627b8c7160186c66<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing GCI related URL patterns
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from django.conf.urls.defaults import url as django_url
from soc.views.helper import url_patterns
def url(regex, view, kwargs=None, name=None):
"""Constructs an url pattern prefixed with ^gci/.
Args: see django.conf.urls.defaults.url
"""
return django_url('^gci/%s' % regex, view, kwargs=kwargs, name=name)
TASK = url_patterns.namedLinkIdPattern(['sponsor', 'program',
'organization', 'task'])
|
Implement GCI url patterns module based on the core url patterns module.
This code is partly taken from Selwyn's patches during GSoC 2011. Thanks
to Selwyn for the patch.
--HG--
extra : rebase_source : a64b9d703c2e309084a77aec627b8c7160186c66#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing GCI related URL patterns
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from django.conf.urls.defaults import url as django_url
from soc.views.helper import url_patterns
def url(regex, view, kwargs=None, name=None):
"""Constructs an url pattern prefixed with ^gci/.
Args: see django.conf.urls.defaults.url
"""
return django_url('^gci/%s' % regex, view, kwargs=kwargs, name=name)
TASK = url_patterns.namedLinkIdPattern(['sponsor', 'program',
'organization', 'task'])
|
<commit_before><commit_msg>Implement GCI url patterns module based on the core url patterns module.
This code is partly taken from Selwyn's patches during GSoC 2011. Thanks
to Selwyn for the patch.
--HG--
extra : rebase_source : a64b9d703c2e309084a77aec627b8c7160186c66<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for constructing GCI related URL patterns
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from django.conf.urls.defaults import url as django_url
from soc.views.helper import url_patterns
def url(regex, view, kwargs=None, name=None):
"""Constructs an url pattern prefixed with ^gci/.
Args: see django.conf.urls.defaults.url
"""
return django_url('^gci/%s' % regex, view, kwargs=kwargs, name=name)
TASK = url_patterns.namedLinkIdPattern(['sponsor', 'program',
'organization', 'task'])
|
|
58d0bd08b942f07f80ba3e4a0bd0a6849919e4c9
|
utils/simtel_to_astri_cropped_geom_json_file.py
|
utils/simtel_to_astri_cropped_geom_json_file.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['simtel_to_geom_json_file']
import argparse
import numpy as np
from datapipe.io import geometry_converter
import ctapipe
# Old version
from ctapipe.io import camera
# New version
#from ctapipe.instrument import camera
def simtel_to_geom_json_file(output_json_file=None):
num_pixels_x = 40
num_pixels_y = 40
range_x = (-0.142555996776, 0.142555996776)
range_y = (-0.142555996776, 0.142555996776)
geom = camera.make_rectangular_camera_geometry(num_pixels_x,
num_pixels_y,
range_x,
range_y)
# Convert and write the geom object
if output_json_file is None:
output_json_file = "astri_cropped.geom.json"
geometry_converter.geom_to_json_file(geom, output_json_file)
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate geom.json file form simtel a file."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--output", "-o",
metavar="FILE",
help="The geom.json output file path")
args = parser.parse_args()
output_file = args.output
simtel_to_geom_json_file(output_file)
if __name__ == "__main__":
main()
|
Add a tool script to make geom.json files.
|
Add a tool script to make geom.json files.
|
Python
|
mit
|
jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/sap-cta-data-pipeline
|
Add a tool script to make geom.json files.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['simtel_to_geom_json_file']
import argparse
import numpy as np
from datapipe.io import geometry_converter
import ctapipe
# Old version
from ctapipe.io import camera
# New version
#from ctapipe.instrument import camera
def simtel_to_geom_json_file(output_json_file=None):
num_pixels_x = 40
num_pixels_y = 40
range_x = (-0.142555996776, 0.142555996776)
range_y = (-0.142555996776, 0.142555996776)
geom = camera.make_rectangular_camera_geometry(num_pixels_x,
num_pixels_y,
range_x,
range_y)
# Convert and write the geom object
if output_json_file is None:
output_json_file = "astri_cropped.geom.json"
geometry_converter.geom_to_json_file(geom, output_json_file)
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate geom.json file form simtel a file."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--output", "-o",
metavar="FILE",
help="The geom.json output file path")
args = parser.parse_args()
output_file = args.output
simtel_to_geom_json_file(output_file)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a tool script to make geom.json files.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['simtel_to_geom_json_file']
import argparse
import numpy as np
from datapipe.io import geometry_converter
import ctapipe
# Old version
from ctapipe.io import camera
# New version
#from ctapipe.instrument import camera
def simtel_to_geom_json_file(output_json_file=None):
num_pixels_x = 40
num_pixels_y = 40
range_x = (-0.142555996776, 0.142555996776)
range_y = (-0.142555996776, 0.142555996776)
geom = camera.make_rectangular_camera_geometry(num_pixels_x,
num_pixels_y,
range_x,
range_y)
# Convert and write the geom object
if output_json_file is None:
output_json_file = "astri_cropped.geom.json"
geometry_converter.geom_to_json_file(geom, output_json_file)
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate geom.json file form simtel a file."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--output", "-o",
metavar="FILE",
help="The geom.json output file path")
args = parser.parse_args()
output_file = args.output
simtel_to_geom_json_file(output_file)
if __name__ == "__main__":
main()
|
Add a tool script to make geom.json files.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['simtel_to_geom_json_file']
import argparse
import numpy as np
from datapipe.io import geometry_converter
import ctapipe
# Old version
from ctapipe.io import camera
# New version
#from ctapipe.instrument import camera
def simtel_to_geom_json_file(output_json_file=None):
num_pixels_x = 40
num_pixels_y = 40
range_x = (-0.142555996776, 0.142555996776)
range_y = (-0.142555996776, 0.142555996776)
geom = camera.make_rectangular_camera_geometry(num_pixels_x,
num_pixels_y,
range_x,
range_y)
# Convert and write the geom object
if output_json_file is None:
output_json_file = "astri_cropped.geom.json"
geometry_converter.geom_to_json_file(geom, output_json_file)
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate geom.json file form simtel a file."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--output", "-o",
metavar="FILE",
help="The geom.json output file path")
args = parser.parse_args()
output_file = args.output
simtel_to_geom_json_file(output_file)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a tool script to make geom.json files.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['simtel_to_geom_json_file']
import argparse
import numpy as np
from datapipe.io import geometry_converter
import ctapipe
# Old version
from ctapipe.io import camera
# New version
#from ctapipe.instrument import camera
def simtel_to_geom_json_file(output_json_file=None):
num_pixels_x = 40
num_pixels_y = 40
range_x = (-0.142555996776, 0.142555996776)
range_y = (-0.142555996776, 0.142555996776)
geom = camera.make_rectangular_camera_geometry(num_pixels_x,
num_pixels_y,
range_x,
range_y)
# Convert and write the geom object
if output_json_file is None:
output_json_file = "astri_cropped.geom.json"
geometry_converter.geom_to_json_file(geom, output_json_file)
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate geom.json file form simtel a file."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--output", "-o",
metavar="FILE",
help="The geom.json output file path")
args = parser.parse_args()
output_file = args.output
simtel_to_geom_json_file(output_file)
if __name__ == "__main__":
main()
|
|
e0e7ccbea4a0c55533e27e595b59e442a9da263d
|
openstack/common/setup.py
|
openstack/common/setup.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
Split functions to avoid eventlet import.
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d
|
Python
|
apache-2.0
|
markmc/oslo.packaging,markmc/oslo.packaging
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
<commit_before><commit_msg>Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
<commit_before><commit_msg>Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
|
74e5e9e8d6dd117b91466f30b961593ef43dd618
|
dbaas/maintenance/scripts/compare_resource_id.py
|
dbaas/maintenance/scripts/compare_resource_id.py
|
from faasclient.client import Client
credential = get_credentials_for(Environment.objects.get(name='dev'), CredentialType.FAAS)
faas_client = Client(
authurl=credential.endpoint,
user=credential.user, key=credential.password,
tenant_name=credential.project,
insecure=False
)
for vol in Volume.objects.filter(host__instances__databaseinfra__environment__name='dev'):
p = VolumeProviderBase(vol.host.instances.first())
database_resource = p.get_volume(vol).get('resource_id')
faas_resource = faas_client.export_get(vol.identifier)[1].get('resource_id')
if database_resource != faas_resource:
print "volume {} com resource diferente dbaas: {} faas: {}".format(vol.identifier, database_resource, faas_resource)
else:
print "Volume {} OK".format(vol.identifier)
|
Create script to compare resource id of all volumes
|
Create script to compare resource id of all volumes
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Create script to compare resource id of all volumes
|
from faasclient.client import Client
credential = get_credentials_for(Environment.objects.get(name='dev'), CredentialType.FAAS)
faas_client = Client(
authurl=credential.endpoint,
user=credential.user, key=credential.password,
tenant_name=credential.project,
insecure=False
)
for vol in Volume.objects.filter(host__instances__databaseinfra__environment__name='dev'):
p = VolumeProviderBase(vol.host.instances.first())
database_resource = p.get_volume(vol).get('resource_id')
faas_resource = faas_client.export_get(vol.identifier)[1].get('resource_id')
if database_resource != faas_resource:
print "volume {} com resource diferente dbaas: {} faas: {}".format(vol.identifier, database_resource, faas_resource)
else:
print "Volume {} OK".format(vol.identifier)
|
<commit_before><commit_msg>Create script to compare resource id of all volumes<commit_after>
|
from faasclient.client import Client
credential = get_credentials_for(Environment.objects.get(name='dev'), CredentialType.FAAS)
faas_client = Client(
authurl=credential.endpoint,
user=credential.user, key=credential.password,
tenant_name=credential.project,
insecure=False
)
for vol in Volume.objects.filter(host__instances__databaseinfra__environment__name='dev'):
p = VolumeProviderBase(vol.host.instances.first())
database_resource = p.get_volume(vol).get('resource_id')
faas_resource = faas_client.export_get(vol.identifier)[1].get('resource_id')
if database_resource != faas_resource:
print "volume {} com resource diferente dbaas: {} faas: {}".format(vol.identifier, database_resource, faas_resource)
else:
print "Volume {} OK".format(vol.identifier)
|
Create script to compare resource id of all volumesfrom faasclient.client import Client
credential = get_credentials_for(Environment.objects.get(name='dev'), CredentialType.FAAS)
faas_client = Client(
authurl=credential.endpoint,
user=credential.user, key=credential.password,
tenant_name=credential.project,
insecure=False
)
for vol in Volume.objects.filter(host__instances__databaseinfra__environment__name='dev'):
p = VolumeProviderBase(vol.host.instances.first())
database_resource = p.get_volume(vol).get('resource_id')
faas_resource = faas_client.export_get(vol.identifier)[1].get('resource_id')
if database_resource != faas_resource:
print "volume {} com resource diferente dbaas: {} faas: {}".format(vol.identifier, database_resource, faas_resource)
else:
print "Volume {} OK".format(vol.identifier)
|
<commit_before><commit_msg>Create script to compare resource id of all volumes<commit_after>from faasclient.client import Client
credential = get_credentials_for(Environment.objects.get(name='dev'), CredentialType.FAAS)
faas_client = Client(
authurl=credential.endpoint,
user=credential.user, key=credential.password,
tenant_name=credential.project,
insecure=False
)
for vol in Volume.objects.filter(host__instances__databaseinfra__environment__name='dev'):
p = VolumeProviderBase(vol.host.instances.first())
database_resource = p.get_volume(vol).get('resource_id')
faas_resource = faas_client.export_get(vol.identifier)[1].get('resource_id')
if database_resource != faas_resource:
print "volume {} com resource diferente dbaas: {} faas: {}".format(vol.identifier, database_resource, faas_resource)
else:
print "Volume {} OK".format(vol.identifier)
|
|
4a3c77eb0e69b1bd79c411e2956c158f969bf04c
|
databroker/tests/test_v2/test_relative_root_map.py
|
databroker/tests/test_v2/test_relative_root_map.py
|
import pathlib
import shutil
from bluesky.plans import count
from databroker._drivers.jsonl import BlueskyJSONLCatalog
import intake
from ophyd.sim import img
import pytest
from suitcase.jsonl import Serializer
def test_relative_root_map(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
RE(count([img]), serializer)
serializer.close()
dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))
relative_d = str(pathlib.Path(dest.relative_to(directory)))
root_map = {img.save_path: relative_d}
# At this point root map maps the original absolute path to one relative to
# the diretory containing the catalog.
CATALOG_FILE = f"""
sources:
test_relative_root_map:
driver: bluesky-jsonl-catalog
args:
paths:
- {directory}/*.jsonl
root_map:
{img.save_path}: {relative_d}"""
catalog_path = str(pathlib.Path(directory, "catalog.yml"))
with open(catalog_path, "w") as file:
file.write(CATALOG_FILE)
catalog = intake.open_catalog(catalog_path)
subcatalog = catalog["test_relative_root_map"]()
# At init time, Broker should resolve the relative path to an absolute one.
assert subcatalog.root_map[img.save_path] == str(dest)
# But it can only do this if it has a catalog *file* to interpret the path
# relative to.
with pytest.raises(ValueError):
BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)
|
Add test for relative root_map.
|
Add test for relative root_map.
|
Python
|
bsd-3-clause
|
ericdill/databroker,ericdill/databroker
|
Add test for relative root_map.
|
import pathlib
import shutil
from bluesky.plans import count
from databroker._drivers.jsonl import BlueskyJSONLCatalog
import intake
from ophyd.sim import img
import pytest
from suitcase.jsonl import Serializer
def test_relative_root_map(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
RE(count([img]), serializer)
serializer.close()
dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))
relative_d = str(pathlib.Path(dest.relative_to(directory)))
root_map = {img.save_path: relative_d}
# At this point root map maps the original absolute path to one relative to
# the diretory containing the catalog.
CATALOG_FILE = f"""
sources:
test_relative_root_map:
driver: bluesky-jsonl-catalog
args:
paths:
- {directory}/*.jsonl
root_map:
{img.save_path}: {relative_d}"""
catalog_path = str(pathlib.Path(directory, "catalog.yml"))
with open(catalog_path, "w") as file:
file.write(CATALOG_FILE)
catalog = intake.open_catalog(catalog_path)
subcatalog = catalog["test_relative_root_map"]()
# At init time, Broker should resolve the relative path to an absolute one.
assert subcatalog.root_map[img.save_path] == str(dest)
# But it can only do this if it has a catalog *file* to interpret the path
# relative to.
with pytest.raises(ValueError):
BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)
|
<commit_before><commit_msg>Add test for relative root_map.<commit_after>
|
import pathlib
import shutil
from bluesky.plans import count
from databroker._drivers.jsonl import BlueskyJSONLCatalog
import intake
from ophyd.sim import img
import pytest
from suitcase.jsonl import Serializer
def test_relative_root_map(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
RE(count([img]), serializer)
serializer.close()
dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))
relative_d = str(pathlib.Path(dest.relative_to(directory)))
root_map = {img.save_path: relative_d}
# At this point root map maps the original absolute path to one relative to
# the diretory containing the catalog.
CATALOG_FILE = f"""
sources:
test_relative_root_map:
driver: bluesky-jsonl-catalog
args:
paths:
- {directory}/*.jsonl
root_map:
{img.save_path}: {relative_d}"""
catalog_path = str(pathlib.Path(directory, "catalog.yml"))
with open(catalog_path, "w") as file:
file.write(CATALOG_FILE)
catalog = intake.open_catalog(catalog_path)
subcatalog = catalog["test_relative_root_map"]()
# At init time, Broker should resolve the relative path to an absolute one.
assert subcatalog.root_map[img.save_path] == str(dest)
# But it can only do this if it has a catalog *file* to interpret the path
# relative to.
with pytest.raises(ValueError):
BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)
|
Add test for relative root_map.import pathlib
import shutil
from bluesky.plans import count
from databroker._drivers.jsonl import BlueskyJSONLCatalog
import intake
from ophyd.sim import img
import pytest
from suitcase.jsonl import Serializer
def test_relative_root_map(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
RE(count([img]), serializer)
serializer.close()
dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))
relative_d = str(pathlib.Path(dest.relative_to(directory)))
root_map = {img.save_path: relative_d}
# At this point root map maps the original absolute path to one relative to
# the diretory containing the catalog.
CATALOG_FILE = f"""
sources:
test_relative_root_map:
driver: bluesky-jsonl-catalog
args:
paths:
- {directory}/*.jsonl
root_map:
{img.save_path}: {relative_d}"""
catalog_path = str(pathlib.Path(directory, "catalog.yml"))
with open(catalog_path, "w") as file:
file.write(CATALOG_FILE)
catalog = intake.open_catalog(catalog_path)
subcatalog = catalog["test_relative_root_map"]()
# At init time, Broker should resolve the relative path to an absolute one.
assert subcatalog.root_map[img.save_path] == str(dest)
# But it can only do this if it has a catalog *file* to interpret the path
# relative to.
with pytest.raises(ValueError):
BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)
|
<commit_before><commit_msg>Add test for relative root_map.<commit_after>import pathlib
import shutil
from bluesky.plans import count
from databroker._drivers.jsonl import BlueskyJSONLCatalog
import intake
from ophyd.sim import img
import pytest
from suitcase.jsonl import Serializer
def test_relative_root_map(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
RE(count([img]), serializer)
serializer.close()
dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))
relative_d = str(pathlib.Path(dest.relative_to(directory)))
root_map = {img.save_path: relative_d}
# At this point root map maps the original absolute path to one relative to
# the diretory containing the catalog.
CATALOG_FILE = f"""
sources:
test_relative_root_map:
driver: bluesky-jsonl-catalog
args:
paths:
- {directory}/*.jsonl
root_map:
{img.save_path}: {relative_d}"""
catalog_path = str(pathlib.Path(directory, "catalog.yml"))
with open(catalog_path, "w") as file:
file.write(CATALOG_FILE)
catalog = intake.open_catalog(catalog_path)
subcatalog = catalog["test_relative_root_map"]()
# At init time, Broker should resolve the relative path to an absolute one.
assert subcatalog.root_map[img.save_path] == str(dest)
# But it can only do this if it has a catalog *file* to interpret the path
# relative to.
with pytest.raises(ValueError):
BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)
|
|
53cfbf8ffd1485bf6a88f5d92fcb9fdaf9c6866a
|
venues/plugin_jacktherooster.py
|
venues/plugin_jacktherooster.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Jacktherooster(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.meteli.net/jacktherooster"
self.name = "Jack the rooster"
self.city = "Tampere"
self.country = "Finland"
# Parsing patterns
self.monetaryp = re.compile("[0-9]+")
def parse_price(self, prices: List[str]) -> str:
found_prices = list()
for p in prices:
if (match := self.monetaryp.search(p)) is not None:
found_prices.append(match.group())
return "{}€".format("".join(found_prices))
def parse_date(self, tag: str):
this_month = int(time.strftime("%m"))
year = int(time.strftime("%Y"))
if len(tag) == 0:
return ""
day, month = tag.rstrip(".").split(".")
day = int(day)
month = int(month)
# Are we on the new year already?
if month < this_month:
year += 1
return f"{year:04d}-{month:02d}-{day:02d}"
def parse_event(self, tag: lxml.html.HtmlElement):
datedata = " ".join(tag.xpath('.//span[contains(@class, '
'"event-date")]/span/text()'))
date = self.parse_date(datedata)
artist = " ".join(tag.xpath('.//span[contains(@class, '
'"event-info")]/h2/text()'))
price = " ".join(tag.xpath('.//span[contains(@class, '
'"price")]/text()'))
price = self.parse_price(price.split(" "))
return {"venue": self.get_venue_name(),
"date": date,
"name": artist,
"price": price}
def parse_events(self, data: bytes):
doc = lxml.html.fromstring(data)
eventtags = doc.xpath('//div[@class="event-list"]')
for et in eventtags:
yield self.parse_event(et)
if __name__ == '__main__':
import requests
k = Jacktherooster()
r = requests.get(k.url)
for e in k.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
|
Add plugin for venue Jack the rooster.
|
Add plugin for venue Jack the rooster.
|
Python
|
isc
|
weezel/BandEventNotifier
|
Add plugin for venue Jack the rooster.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Jacktherooster(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.meteli.net/jacktherooster"
self.name = "Jack the rooster"
self.city = "Tampere"
self.country = "Finland"
# Parsing patterns
self.monetaryp = re.compile("[0-9]+")
def parse_price(self, prices: List[str]) -> str:
found_prices = list()
for p in prices:
if (match := self.monetaryp.search(p)) is not None:
found_prices.append(match.group())
return "{}€".format("".join(found_prices))
def parse_date(self, tag: str):
this_month = int(time.strftime("%m"))
year = int(time.strftime("%Y"))
if len(tag) == 0:
return ""
day, month = tag.rstrip(".").split(".")
day = int(day)
month = int(month)
# Are we on the new year already?
if month < this_month:
year += 1
return f"{year:04d}-{month:02d}-{day:02d}"
def parse_event(self, tag: lxml.html.HtmlElement):
datedata = " ".join(tag.xpath('.//span[contains(@class, '
'"event-date")]/span/text()'))
date = self.parse_date(datedata)
artist = " ".join(tag.xpath('.//span[contains(@class, '
'"event-info")]/h2/text()'))
price = " ".join(tag.xpath('.//span[contains(@class, '
'"price")]/text()'))
price = self.parse_price(price.split(" "))
return {"venue": self.get_venue_name(),
"date": date,
"name": artist,
"price": price}
def parse_events(self, data: bytes):
doc = lxml.html.fromstring(data)
eventtags = doc.xpath('//div[@class="event-list"]')
for et in eventtags:
yield self.parse_event(et)
if __name__ == '__main__':
import requests
k = Jacktherooster()
r = requests.get(k.url)
for e in k.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
|
<commit_before><commit_msg>Add plugin for venue Jack the rooster.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Jacktherooster(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.meteli.net/jacktherooster"
self.name = "Jack the rooster"
self.city = "Tampere"
self.country = "Finland"
# Parsing patterns
self.monetaryp = re.compile("[0-9]+")
def parse_price(self, prices: List[str]) -> str:
found_prices = list()
for p in prices:
if (match := self.monetaryp.search(p)) is not None:
found_prices.append(match.group())
return "{}€".format("".join(found_prices))
def parse_date(self, tag: str):
this_month = int(time.strftime("%m"))
year = int(time.strftime("%Y"))
if len(tag) == 0:
return ""
day, month = tag.rstrip(".").split(".")
day = int(day)
month = int(month)
# Are we on the new year already?
if month < this_month:
year += 1
return f"{year:04d}-{month:02d}-{day:02d}"
def parse_event(self, tag: lxml.html.HtmlElement):
datedata = " ".join(tag.xpath('.//span[contains(@class, '
'"event-date")]/span/text()'))
date = self.parse_date(datedata)
artist = " ".join(tag.xpath('.//span[contains(@class, '
'"event-info")]/h2/text()'))
price = " ".join(tag.xpath('.//span[contains(@class, '
'"price")]/text()'))
price = self.parse_price(price.split(" "))
return {"venue": self.get_venue_name(),
"date": date,
"name": artist,
"price": price}
def parse_events(self, data: bytes):
doc = lxml.html.fromstring(data)
eventtags = doc.xpath('//div[@class="event-list"]')
for et in eventtags:
yield self.parse_event(et)
if __name__ == '__main__':
import requests
k = Jacktherooster()
r = requests.get(k.url)
for e in k.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
|
Add plugin for venue Jack the rooster.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Jacktherooster(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.meteli.net/jacktherooster"
self.name = "Jack the rooster"
self.city = "Tampere"
self.country = "Finland"
# Parsing patterns
self.monetaryp = re.compile("[0-9]+")
def parse_price(self, prices: List[str]) -> str:
found_prices = list()
for p in prices:
if (match := self.monetaryp.search(p)) is not None:
found_prices.append(match.group())
return "{}€".format("".join(found_prices))
def parse_date(self, tag: str):
this_month = int(time.strftime("%m"))
year = int(time.strftime("%Y"))
if len(tag) == 0:
return ""
day, month = tag.rstrip(".").split(".")
day = int(day)
month = int(month)
# Are we on the new year already?
if month < this_month:
year += 1
return f"{year:04d}-{month:02d}-{day:02d}"
def parse_event(self, tag: lxml.html.HtmlElement):
datedata = " ".join(tag.xpath('.//span[contains(@class, '
'"event-date")]/span/text()'))
date = self.parse_date(datedata)
artist = " ".join(tag.xpath('.//span[contains(@class, '
'"event-info")]/h2/text()'))
price = " ".join(tag.xpath('.//span[contains(@class, '
'"price")]/text()'))
price = self.parse_price(price.split(" "))
return {"venue": self.get_venue_name(),
"date": date,
"name": artist,
"price": price}
def parse_events(self, data: bytes):
doc = lxml.html.fromstring(data)
eventtags = doc.xpath('//div[@class="event-list"]')
for et in eventtags:
yield self.parse_event(et)
if __name__ == '__main__':
import requests
k = Jacktherooster()
r = requests.get(k.url)
for e in k.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
|
<commit_before><commit_msg>Add plugin for venue Jack the rooster.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Jacktherooster(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.meteli.net/jacktherooster"
self.name = "Jack the rooster"
self.city = "Tampere"
self.country = "Finland"
# Parsing patterns
self.monetaryp = re.compile("[0-9]+")
def parse_price(self, prices: List[str]) -> str:
found_prices = list()
for p in prices:
if (match := self.monetaryp.search(p)) is not None:
found_prices.append(match.group())
return "{}€".format("".join(found_prices))
def parse_date(self, tag: str):
this_month = int(time.strftime("%m"))
year = int(time.strftime("%Y"))
if len(tag) == 0:
return ""
day, month = tag.rstrip(".").split(".")
day = int(day)
month = int(month)
# Are we on the new year already?
if month < this_month:
year += 1
return f"{year:04d}-{month:02d}-{day:02d}"
def parse_event(self, tag: lxml.html.HtmlElement):
datedata = " ".join(tag.xpath('.//span[contains(@class, '
'"event-date")]/span/text()'))
date = self.parse_date(datedata)
artist = " ".join(tag.xpath('.//span[contains(@class, '
'"event-info")]/h2/text()'))
price = " ".join(tag.xpath('.//span[contains(@class, '
'"price")]/text()'))
price = self.parse_price(price.split(" "))
return {"venue": self.get_venue_name(),
"date": date,
"name": artist,
"price": price}
def parse_events(self, data: bytes):
doc = lxml.html.fromstring(data)
eventtags = doc.xpath('//div[@class="event-list"]')
for et in eventtags:
yield self.parse_event(et)
if __name__ == '__main__':
import requests
k = Jacktherooster()
r = requests.get(k.url)
for e in k.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
|
|
d84f748f60542d91542ef2e623947f72d562ca35
|
junction/proposals/migrations/0028_auto_20200617_2337.py
|
junction/proposals/migrations/0028_auto_20200617_2337.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-06-17 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
|
Add missing migrations related to video URL
|
Add missing migrations related to video URL
Related to #668
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
|
Python
|
mit
|
pythonindia/junction,pythonindia/junction,pythonindia/junction,pythonindia/junction
|
Add missing migrations related to video URL
Related to #668
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-06-17 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
|
<commit_before><commit_msg>Add missing migrations related to video URL
Related to #668
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-06-17 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
|
Add missing migrations related to video URL
Related to #668
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-06-17 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
|
<commit_before><commit_msg>Add missing migrations related to video URL
Related to #668
Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-06-17 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0027_auto_20200502_0540"),
]
operations = [
migrations.AlterField(
model_name="historicalproposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
migrations.AlterField(
model_name="proposal",
name="video_url",
field=models.URLField(
blank=True,
default="",
help_text="Short 1-2 min video describing your talk",
),
),
]
|
|
34968a1c507af6d5343e15e4c2da533fd90740da
|
py/sort-characters-by-frequency.py
|
py/sort-characters-by-frequency.py
|
from collections import Counter
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
c = Counter(s)
return ''.join(a * b for a, b in sorted(c.iteritems(), key=lambda x:-x[1]))
|
Add py solution for 451. Sort Characters By Frequency
|
Add py solution for 451. Sort Characters By Frequency
451. Sort Characters By Frequency: https://leetcode.com/problems/sort-characters-by-frequency/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 451. Sort Characters By Frequency
451. Sort Characters By Frequency: https://leetcode.com/problems/sort-characters-by-frequency/
|
from collections import Counter
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
c = Counter(s)
return ''.join(a * b for a, b in sorted(c.iteritems(), key=lambda x:-x[1]))
|
<commit_before><commit_msg>Add py solution for 451. Sort Characters By Frequency
451. Sort Characters By Frequency: https://leetcode.com/problems/sort-characters-by-frequency/<commit_after>
|
from collections import Counter
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
c = Counter(s)
return ''.join(a * b for a, b in sorted(c.iteritems(), key=lambda x:-x[1]))
|
Add py solution for 451. Sort Characters By Frequency
451. Sort Characters By Frequency: https://leetcode.com/problems/sort-characters-by-frequency/from collections import Counter
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
c = Counter(s)
return ''.join(a * b for a, b in sorted(c.iteritems(), key=lambda x:-x[1]))
|
<commit_before><commit_msg>Add py solution for 451. Sort Characters By Frequency
451. Sort Characters By Frequency: https://leetcode.com/problems/sort-characters-by-frequency/<commit_after>from collections import Counter
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
c = Counter(s)
return ''.join(a * b for a, b in sorted(c.iteritems(), key=lambda x:-x[1]))
|
|
b7e0811fccbd76ee690c9e042ca86ad419b2d226
|
pyservice/extensions.py
|
pyservice/extensions.py
|
from pyservice.layer import Layer
class ClientAuthLayer(Layer):
def handle_request(self, context, next):
client = context["client"]
key = client._attr("authuser", None)
pw = client._attr("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
class ServiceAuthLayer(Layer):
def handle_request(self, context, next):
op_input = context["input"]
key = op_input.get("authuser", None)
pw = op_input.get("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
|
Add sample basic auth layers
|
Add sample basic auth layers
Not very flexible, these are more meant to be a sample for others
creating Layers. Given how similar the code is and how basic most
hande_request() methods will be, it might be worth creating something
similar to @contextmanager, so that it's trivial to create a layer from
a single method which only takes context:
@layer_decorator
def auth_layer(context):
# Auth here
yield # This is effectively next.handle_request(context)
# Post call
|
Python
|
mit
|
numberoverzero/pyservice
|
Add sample basic auth layers
Not very flexible, these are more meant to be a sample for others
creating Layers. Given how similar the code is and how basic most
hande_request() methods will be, it might be worth creating something
similar to @contextmanager, so that it's trivial to create a layer from
a single method which only takes context:
@layer_decorator
def auth_layer(context):
# Auth here
yield # This is effectively next.handle_request(context)
# Post call
|
from pyservice.layer import Layer
class ClientAuthLayer(Layer):
def handle_request(self, context, next):
client = context["client"]
key = client._attr("authuser", None)
pw = client._attr("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
class ServiceAuthLayer(Layer):
def handle_request(self, context, next):
op_input = context["input"]
key = op_input.get("authuser", None)
pw = op_input.get("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
|
<commit_before><commit_msg>Add sample basic auth layers
Not very flexible, these are more meant to be a sample for others
creating Layers. Given how similar the code is and how basic most
hande_request() methods will be, it might be worth creating something
similar to @contextmanager, so that it's trivial to create a layer from
a single method which only takes context:
@layer_decorator
def auth_layer(context):
# Auth here
yield # This is effectively next.handle_request(context)
# Post call<commit_after>
|
from pyservice.layer import Layer
class ClientAuthLayer(Layer):
def handle_request(self, context, next):
client = context["client"]
key = client._attr("authuser", None)
pw = client._attr("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
class ServiceAuthLayer(Layer):
def handle_request(self, context, next):
op_input = context["input"]
key = op_input.get("authuser", None)
pw = op_input.get("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
|
Add sample basic auth layers
Not very flexible, these are more meant to be a sample for others
creating Layers. Given how similar the code is and how basic most
hande_request() methods will be, it might be worth creating something
similar to @contextmanager, so that it's trivial to create a layer from
a single method which only takes context:
@layer_decorator
def auth_layer(context):
# Auth here
yield # This is effectively next.handle_request(context)
# Post callfrom pyservice.layer import Layer
class ClientAuthLayer(Layer):
def handle_request(self, context, next):
client = context["client"]
key = client._attr("authuser", None)
pw = client._attr("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
class ServiceAuthLayer(Layer):
def handle_request(self, context, next):
op_input = context["input"]
key = op_input.get("authuser", None)
pw = op_input.get("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
|
<commit_before><commit_msg>Add sample basic auth layers
Not very flexible, these are more meant to be a sample for others
creating Layers. Given how similar the code is and how basic most
hande_request() methods will be, it might be worth creating something
similar to @contextmanager, so that it's trivial to create a layer from
a single method which only takes context:
@layer_decorator
def auth_layer(context):
# Auth here
yield # This is effectively next.handle_request(context)
# Post call<commit_after>from pyservice.layer import Layer
class ClientAuthLayer(Layer):
def handle_request(self, context, next):
client = context["client"]
key = client._attr("authuser", None)
pw = client._attr("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
class ServiceAuthLayer(Layer):
def handle_request(self, context, next):
op_input = context["input"]
key = op_input.get("authuser", None)
pw = op_input.get("authpw", None)
if key is None or pw is None:
raise ValueError("Must provide authuser and authpw")
next.handle_request(context)
|
|
6f632dc5b0f1ee35aaf0041d5bd09f3458dd9d42
|
enabled/_50_rackspace.py
|
enabled/_50_rackspace.py
|
DASHBOARD = 'rackspace'
ADD_INSTALLED_APPS = [
'rackspace',
]
ADD_ANGULAR_MODULES = ['horizon.dashboard.rackspace']
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
|
Update enabled to be simpler and match rpc-openstack.
|
Update enabled to be simpler and match rpc-openstack.
|
Python
|
apache-2.0
|
BjoernT/rpc-openstack,jacobwagner/rpc-openstack,rcbops/rpc-openstack,robb-romans/rpc-openstack,galstrom21/rpc-openstack,xeregin/rpc-openstack,jacobwagner/rpc-openstack,darrenchan/rpc-openstack,xeregin/rpc-openstack,cloudnull/rpc-openstack,xeregin/rpc-openstack,cloudnull/rpc-openstack,sigmavirus24/rpc-openstack,darrenchan/rpc-openstack,BjoernT/rpc-openstack,hughsaunders/rpc-openstack,major/rpc-openstack,major/rpc-openstack,mancdaz/rpc-openstack,sigmavirus24/rpc-openstack,darrenchan/rpc-openstack,rcbops/rpc-openstack,hughsaunders/rpc-openstack,shannonmitchell/rpc-openstack,cfarquhar/rpc-openstack,cfarquhar/rpc-openstack,robb-romans/rpc-openstack,git-harry/rpc-openstack,git-harry/rpc-openstack,prometheanfire/rpc-openstack,shannonmitchell/rpc-openstack,mancdaz/rpc-openstack,darrenchan/rpc-openstack,xeregin/rpc-openstack,prometheanfire/rpc-openstack,sigmavirus24/rpc-openstack,sigmavirus24/rpc-openstack,galstrom21/rpc-openstack
|
Update enabled to be simpler and match rpc-openstack.
|
DASHBOARD = 'rackspace'
ADD_INSTALLED_APPS = [
'rackspace',
]
ADD_ANGULAR_MODULES = ['horizon.dashboard.rackspace']
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
|
<commit_before><commit_msg>Update enabled to be simpler and match rpc-openstack.<commit_after>
|
DASHBOARD = 'rackspace'
ADD_INSTALLED_APPS = [
'rackspace',
]
ADD_ANGULAR_MODULES = ['horizon.dashboard.rackspace']
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
|
Update enabled to be simpler and match rpc-openstack.DASHBOARD = 'rackspace'
ADD_INSTALLED_APPS = [
'rackspace',
]
ADD_ANGULAR_MODULES = ['horizon.dashboard.rackspace']
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
|
<commit_before><commit_msg>Update enabled to be simpler and match rpc-openstack.<commit_after>DASHBOARD = 'rackspace'
ADD_INSTALLED_APPS = [
'rackspace',
]
ADD_ANGULAR_MODULES = ['horizon.dashboard.rackspace']
# If set to True, this dashboard will not be added to the settings.
DISABLED = False
|
|
964607453ea240f8f281a1d84c24fe3b98fda6f4
|
exp/alto/tools/find_nulls.py
|
exp/alto/tools/find_nulls.py
|
#!/usr/bin/env python
# *-* coding=utf-8 *-*
# Compares the nolarge.graphs file (the one without #-lines) and Alto's output to find sentences that cannot be parsed
import sys
from itertools import izip
import re
def find_nulls(fn1, fn2):
with open(sys.argv[1]) as nolarge_graphs, open (sys.argv[2]) as alto_output:
for x, y, z in izip(nolarge_graphs, alto_output, alto_output):
if re.search("^(<null>|null)", y):
x = x.strip()
z = y.strip()
print("{0}\n{1}\n{2}".format(x, z, ''))
find_nulls(sys.argv[1], sys.argv[2])
|
Add script for finding sentences that cannot be parsed with our IRTG
|
Add script for finding sentences that cannot be parsed with our IRTG
|
Python
|
mit
|
kornai/4lang,kornai/4lang,kornai/4lang,kornai/4lang
|
Add script for finding sentences that cannot be parsed with our IRTG
|
#!/usr/bin/env python
# *-* coding=utf-8 *-*
# Compares the nolarge.graphs file (the one without #-lines) and Alto's output to find sentences that cannot be parsed
import sys
from itertools import izip
import re
def find_nulls(fn1, fn2):
with open(sys.argv[1]) as nolarge_graphs, open (sys.argv[2]) as alto_output:
for x, y, z in izip(nolarge_graphs, alto_output, alto_output):
if re.search("^(<null>|null)", y):
x = x.strip()
z = y.strip()
print("{0}\n{1}\n{2}".format(x, z, ''))
find_nulls(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script for finding sentences that cannot be parsed with our IRTG<commit_after>
|
#!/usr/bin/env python
# *-* coding=utf-8 *-*
# Compares the nolarge.graphs file (the one without #-lines) and Alto's output to find sentences that cannot be parsed
import sys
from itertools import izip
import re
def find_nulls(fn1, fn2):
with open(sys.argv[1]) as nolarge_graphs, open (sys.argv[2]) as alto_output:
for x, y, z in izip(nolarge_graphs, alto_output, alto_output):
if re.search("^(<null>|null)", y):
x = x.strip()
z = y.strip()
print("{0}\n{1}\n{2}".format(x, z, ''))
find_nulls(sys.argv[1], sys.argv[2])
|
Add script for finding sentences that cannot be parsed with our IRTG#!/usr/bin/env python
# *-* coding=utf-8 *-*
# Compares the nolarge.graphs file (the one without #-lines) and Alto's output to find sentences that cannot be parsed
import sys
from itertools import izip
import re
def find_nulls(fn1, fn2):
with open(sys.argv[1]) as nolarge_graphs, open (sys.argv[2]) as alto_output:
for x, y, z in izip(nolarge_graphs, alto_output, alto_output):
if re.search("^(<null>|null)", y):
x = x.strip()
z = y.strip()
print("{0}\n{1}\n{2}".format(x, z, ''))
find_nulls(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script for finding sentences that cannot be parsed with our IRTG<commit_after>#!/usr/bin/env python
# *-* coding=utf-8 *-*
# Compares the nolarge.graphs file (the one without #-lines) and Alto's output to find sentences that cannot be parsed
import sys
from itertools import izip
import re
def find_nulls(fn1, fn2):
with open(sys.argv[1]) as nolarge_graphs, open (sys.argv[2]) as alto_output:
for x, y, z in izip(nolarge_graphs, alto_output, alto_output):
if re.search("^(<null>|null)", y):
x = x.strip()
z = y.strip()
print("{0}\n{1}\n{2}".format(x, z, ''))
find_nulls(sys.argv[1], sys.argv[2])
|
|
95c33d94d5efa5157c45c96cb7f2279c655568a7
|
dashboard/dashboard/services/google_sheets_service.py
|
dashboard/dashboard/services/google_sheets_service.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An interface to the Google Spreadsheets API.
API documentation: https://developers.google.com/sheets/api/reference/rest/
This service uses the default application credentials, so it can only access
public spreadsheets.
"""
import logging
from apiclient import discovery
from oauth2client.client import GoogleCredentials
DISCOVERY_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values
|
Add basic spreadsheet service which can get a range from a public spreadsheet.
|
Add basic spreadsheet service which can get a range from a public spreadsheet.
Justification: the owners and some other more minor details about
benchmarks are currently stored in a spreadsheet. I need to get that
data to make a solid benchmark health report, and I'd like to get the
report soon so we can use it to get rid of benchmarks that are not worth
the time. Ideally this data would come through data pipe and be stored
in the datastore, but I think it will still be quite a while before
that happens. We may also want to add functionality to create a
spreadsheet to the dashboard, either as an additional output format
for this report, or a different use case for CSV API.
BUG=catapult:#3327
Review-Url: https://codereview.chromium.org/2729953002
|
Python
|
bsd-3-clause
|
catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult,sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult,benschmaus/catapult
|
Add basic spreadsheet service which can get a range from a public spreadsheet.
Justification: the owners and some other more minor details about
benchmarks are currently stored in a spreadsheet. I need to get that
data to make a solid benchmark health report, and I'd like to get the
report soon so we can use it to get rid of benchmarks that are not worth
the time. Ideally this data would come through data pipe and be stored
in the datastore, but I think it will still be quite a while before
that happens. We may also want to add functionality to create a
spreadsheet to the dashboard, either as an additional output format
for this report, or a different use case for CSV API.
BUG=catapult:#3327
Review-Url: https://codereview.chromium.org/2729953002
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An interface to the Google Spreadsheets API.
API documentation: https://developers.google.com/sheets/api/reference/rest/
This service uses the default application credentials, so it can only access
public spreadsheets.
"""
import logging
from apiclient import discovery
from oauth2client.client import GoogleCredentials
DISCOVERY_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values
|
<commit_before><commit_msg>Add basic spreadsheet service which can get a range from a public spreadsheet.
Justification: the owners and some other more minor details about
benchmarks are currently stored in a spreadsheet. I need to get that
data to make a solid benchmark health report, and I'd like to get the
report soon so we can use it to get rid of benchmarks that are not worth
the time. Ideally this data would come through data pipe and be stored
in the datastore, but I think it will still be quite a while before
that happens. We may also want to add functionality to create a
spreadsheet to the dashboard, either as an additional output format
for this report, or a different use case for CSV API.
BUG=catapult:#3327
Review-Url: https://codereview.chromium.org/2729953002<commit_after>
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An interface to the Google Spreadsheets API.
API documentation: https://developers.google.com/sheets/api/reference/rest/
This service uses the default application credentials, so it can only access
public spreadsheets.
"""
import logging
from apiclient import discovery
from oauth2client.client import GoogleCredentials
DISCOVERY_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values
|
Add basic spreadsheet service which can get a range from a public spreadsheet.
Justification: the owners and some other more minor details about
benchmarks are currently stored in a spreadsheet. I need to get that
data to make a solid benchmark health report, and I'd like to get the
report soon so we can use it to get rid of benchmarks that are not worth
the time. Ideally this data would come through data pipe and be stored
in the datastore, but I think it will still be quite a while before
that happens. We may also want to add functionality to create a
spreadsheet to the dashboard, either as an additional output format
for this report, or a different use case for CSV API.
BUG=catapult:#3327
Review-Url: https://codereview.chromium.org/2729953002# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An interface to the Google Spreadsheets API.
API documentation: https://developers.google.com/sheets/api/reference/rest/
This service uses the default application credentials, so it can only access
public spreadsheets.
"""
import logging
from apiclient import discovery
from oauth2client.client import GoogleCredentials
DISCOVERY_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values
|
<commit_before><commit_msg>Add basic spreadsheet service which can get a range from a public spreadsheet.
Justification: the owners and some other more minor details about
benchmarks are currently stored in a spreadsheet. I need to get that
data to make a solid benchmark health report, and I'd like to get the
report soon so we can use it to get rid of benchmarks that are not worth
the time. Ideally this data would come through data pipe and be stored
in the datastore, but I think it will still be quite a while before
that happens. We may also want to add functionality to create a
spreadsheet to the dashboard, either as an additional output format
for this report, or a different use case for CSV API.
BUG=catapult:#3327
Review-Url: https://codereview.chromium.org/2729953002<commit_after># Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An interface to the Google Spreadsheets API.
API documentation: https://developers.google.com/sheets/api/reference/rest/
This service uses the default application credentials, so it can only access
public spreadsheets.
"""
import logging
from apiclient import discovery
from oauth2client.client import GoogleCredentials
DISCOVERY_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
def GetRange(spreadsheet_id, sheet_name, range_in_sheet):
"""Gets the given range in the given spreadsheet.
Args:
spreadsheet_id: The id from Google Sheets, like
https://docs.google.com/spreadsheets/d/<THIS PART>/
sheet_name: The name of the sheet to get, from the bottom tab.
range_in_sheet: The range, such as "A1:F14"
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build(
'sheets',
'v4',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
sheet_range = '%s!%s' % (sheet_name, range_in_sheet)
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=sheet_range).execute()
values = result.get('values', [])
if not values:
# Error reporting is not spectacular. Looks like values will just be None.
# But they could be None if there wasn't any data, either. So log it
# and still return the None value.
logging.error('Could not get values for %s of %s', sheet_range, sheet_name)
return values
|
|
e1b62a5d39fd3a4adb7d783c131fd122ba09c3d5
|
support/biicode-build.py
|
support/biicode-build.py
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None, install_dir='.')
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None)
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
Install CMake in system dirs
|
Install CMake in system dirs
|
Python
|
bsd-2-clause
|
blaquee/cppformat,mojoBrendan/fmt,cppformat/cppformat,mojoBrendan/fmt,seungrye/cppformat,lightslife/cppformat,nelson4722/cppformat,alabuzhev/fmt,alabuzhev/fmt,lightslife/cppformat,lightslife/cppformat,cppformat/cppformat,alabuzhev/fmt,cppformat/cppformat,Jopie64/cppformat,blaquee/cppformat,mojoBrendan/fmt,dean0x7d/cppformat,seungrye/cppformat,nelson4722/cppformat,wangshijin/cppformat,dean0x7d/cppformat,nelson4722/cppformat,Jopie64/cppformat,blaquee/cppformat,wangshijin/cppformat,dean0x7d/cppformat,Jopie64/cppformat,seungrye/cppformat,wangshijin/cppformat
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None, install_dir='.')
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
Install CMake in system dirs
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None)
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
<commit_before>#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None, install_dir='.')
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
<commit_msg>Install CMake in system dirs<commit_after>
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None)
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None, install_dir='.')
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
Install CMake in system dirs#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None)
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
<commit_before>#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None, install_dir='.')
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
<commit_msg>Install CMake in system dirs<commit_after>#!/usr/bin/env python
# Build the project with Biicode.
import bootstrap, glob, os, shutil
from download import Downloader
from subprocess import check_call
os_name = os.environ['TRAVIS_OS_NAME']
if os_name == 'linux':
# Install newer version of CMake.
bootstrap.install_cmake(
'cmake-3.1.1-Linux-i386.tar.gz', check_installed=False, download_dir=None)
with Downloader().download('http://www.biicode.com/downloads/latest/ubuntu64') as f:
check_call(['sudo', 'dpkg', '-i', f])
elif os_name == 'osx':
with Downloader().download('http://www.biicode.com/downloads/latest/macos') as f:
check_call(['sudo', 'installer', '-pkg', f, '-target', '/'])
project_dir = 'biicode_project'
check_call(['bii', 'init', project_dir])
cppformat_dir = os.path.join(project_dir, 'blocks/vitaut/cppformat')
shutil.copytree('.', cppformat_dir,
ignore=shutil.ignore_patterns('biicode_project'))
for f in glob.glob('support/biicode/*'):
shutil.copy(f, cppformat_dir)
check_call(['bii', 'cpp:build'], cwd=project_dir)
|
94f1a090214d13d6b2dc28576014c2b83d93d18d
|
sgt/accounts/migrations/0003_auto_20160507_1902.py
|
sgt/accounts/migrations/0003_auto_20160507_1902.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
Create migration file for accounts app
|
Create migration file for accounts app
|
Python
|
mit
|
mazulo/SGT,mazulo/SGT
|
Create migration file for accounts app
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file for accounts app<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
Create migration file for accounts app# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file for accounts app<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
|
8a2979ae72bcd691521e2694c974219edfe5dc3b
|
altair/examples/top_k_with_others.py
|
altair/examples/top_k_with_others.py
|
"""
Top-K plot with Others
----------------------
This example shows how to use aggregate, window, and calculate transfromations
to display the top-k directors by average worldwide gross while grouping the
remaining directors as 'All Others'.
"""
# category: case studies
import altair as alt
from vega_datasets import data
source = data.movies()
alt.Chart(source).mark_bar().encode(
x=alt.X("aggregate_gross:Q", aggregate="mean", title=None),
y=alt.Y(
"ranked_director:N",
sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"),
title=None,
),
).transform_aggregate(
aggregate=[
alt.AggregatedFieldDef(
**{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"}
)
],
groupby=["Director"],
).transform_window(
window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})],
sort=[alt.SortField("aggregate_gross", order="descending")],
).transform_calculate(
as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'"
).properties(
title="Top Directors by Average Worldwide Gross",
)
|
Add example for Top-K with Others.
|
DOC: Add example for Top-K with Others.
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
DOC: Add example for Top-K with Others.
|
"""
Top-K plot with Others
----------------------
This example shows how to use aggregate, window, and calculate transfromations
to display the top-k directors by average worldwide gross while grouping the
remaining directors as 'All Others'.
"""
# category: case studies
import altair as alt
from vega_datasets import data
source = data.movies()
alt.Chart(source).mark_bar().encode(
x=alt.X("aggregate_gross:Q", aggregate="mean", title=None),
y=alt.Y(
"ranked_director:N",
sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"),
title=None,
),
).transform_aggregate(
aggregate=[
alt.AggregatedFieldDef(
**{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"}
)
],
groupby=["Director"],
).transform_window(
window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})],
sort=[alt.SortField("aggregate_gross", order="descending")],
).transform_calculate(
as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'"
).properties(
title="Top Directors by Average Worldwide Gross",
)
|
<commit_before><commit_msg>DOC: Add example for Top-K with Others.<commit_after>
|
"""
Top-K plot with Others
----------------------
This example shows how to use aggregate, window, and calculate transfromations
to display the top-k directors by average worldwide gross while grouping the
remaining directors as 'All Others'.
"""
# category: case studies
import altair as alt
from vega_datasets import data
source = data.movies()
alt.Chart(source).mark_bar().encode(
x=alt.X("aggregate_gross:Q", aggregate="mean", title=None),
y=alt.Y(
"ranked_director:N",
sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"),
title=None,
),
).transform_aggregate(
aggregate=[
alt.AggregatedFieldDef(
**{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"}
)
],
groupby=["Director"],
).transform_window(
window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})],
sort=[alt.SortField("aggregate_gross", order="descending")],
).transform_calculate(
as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'"
).properties(
title="Top Directors by Average Worldwide Gross",
)
|
DOC: Add example for Top-K with Others."""
Top-K plot with Others
----------------------
This example shows how to use aggregate, window, and calculate transfromations
to display the top-k directors by average worldwide gross while grouping the
remaining directors as 'All Others'.
"""
# category: case studies
import altair as alt
from vega_datasets import data
source = data.movies()
alt.Chart(source).mark_bar().encode(
x=alt.X("aggregate_gross:Q", aggregate="mean", title=None),
y=alt.Y(
"ranked_director:N",
sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"),
title=None,
),
).transform_aggregate(
aggregate=[
alt.AggregatedFieldDef(
**{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"}
)
],
groupby=["Director"],
).transform_window(
window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})],
sort=[alt.SortField("aggregate_gross", order="descending")],
).transform_calculate(
as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'"
).properties(
title="Top Directors by Average Worldwide Gross",
)
|
<commit_before><commit_msg>DOC: Add example for Top-K with Others.<commit_after>"""
Top-K plot with Others
----------------------
This example shows how to use aggregate, window, and calculate transfromations
to display the top-k directors by average worldwide gross while grouping the
remaining directors as 'All Others'.
"""
# category: case studies
import altair as alt
from vega_datasets import data
source = data.movies()
alt.Chart(source).mark_bar().encode(
x=alt.X("aggregate_gross:Q", aggregate="mean", title=None),
y=alt.Y(
"ranked_director:N",
sort=alt.Sort(op="mean", field="aggregate_gross", order="descending"),
title=None,
),
).transform_aggregate(
aggregate=[
alt.AggregatedFieldDef(
**{"as": "aggregate_gross", "op": "mean", "field": "Worldwide_Gross"}
)
],
groupby=["Director"],
).transform_window(
window=[alt.WindowFieldDef(**{"as": "rank", "op": "row_number"})],
sort=[alt.SortField("aggregate_gross", order="descending")],
).transform_calculate(
as_="ranked_director", calculate="datum.rank < 10 ? datum.Director : 'All Others'"
).properties(
title="Top Directors by Average Worldwide Gross",
)
|
|
fd6a777857bc55f1744d86d3756051b236b7f822
|
package_monitor/migrations/0004_auto_20160109_1339.py
|
package_monitor/migrations/0004_auto_20160109_1339.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package_monitor', '0003_packageversion_next_version'),
]
operations = [
migrations.AlterField(
model_name='packageversion',
name='is_editable',
field=models.BooleanField(default=False, help_text=b"True if this requirement is specified with '-e' flag.", verbose_name=b'Editable (-e)'),
),
migrations.AlterField(
model_name='packageversion',
name='url',
field=models.URLField(help_text=b'The PyPI URL to check - (blank if editable).', null=True, blank=True),
),
]
|
Add missing migration, bump to version 0.3.2
|
Add missing migration, bump to version 0.3.2
It's only metadata (labels), so not a dealbreaker.
|
Python
|
mit
|
yunojuno/django-package-monitor,yunojuno/django-package-monitor
|
Add missing migration, bump to version 0.3.2
It's only metadata (labels), so not a dealbreaker.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package_monitor', '0003_packageversion_next_version'),
]
operations = [
migrations.AlterField(
model_name='packageversion',
name='is_editable',
field=models.BooleanField(default=False, help_text=b"True if this requirement is specified with '-e' flag.", verbose_name=b'Editable (-e)'),
),
migrations.AlterField(
model_name='packageversion',
name='url',
field=models.URLField(help_text=b'The PyPI URL to check - (blank if editable).', null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add missing migration, bump to version 0.3.2
It's only metadata (labels), so not a dealbreaker.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package_monitor', '0003_packageversion_next_version'),
]
operations = [
migrations.AlterField(
model_name='packageversion',
name='is_editable',
field=models.BooleanField(default=False, help_text=b"True if this requirement is specified with '-e' flag.", verbose_name=b'Editable (-e)'),
),
migrations.AlterField(
model_name='packageversion',
name='url',
field=models.URLField(help_text=b'The PyPI URL to check - (blank if editable).', null=True, blank=True),
),
]
|
Add missing migration, bump to version 0.3.2
It's only metadata (labels), so not a dealbreaker.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package_monitor', '0003_packageversion_next_version'),
]
operations = [
migrations.AlterField(
model_name='packageversion',
name='is_editable',
field=models.BooleanField(default=False, help_text=b"True if this requirement is specified with '-e' flag.", verbose_name=b'Editable (-e)'),
),
migrations.AlterField(
model_name='packageversion',
name='url',
field=models.URLField(help_text=b'The PyPI URL to check - (blank if editable).', null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add missing migration, bump to version 0.3.2
It's only metadata (labels), so not a dealbreaker.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package_monitor', '0003_packageversion_next_version'),
]
operations = [
migrations.AlterField(
model_name='packageversion',
name='is_editable',
field=models.BooleanField(default=False, help_text=b"True if this requirement is specified with '-e' flag.", verbose_name=b'Editable (-e)'),
),
migrations.AlterField(
model_name='packageversion',
name='url',
field=models.URLField(help_text=b'The PyPI URL to check - (blank if editable).', null=True, blank=True),
),
]
|
|
0534a6a8a59d16d189b6dc8f452a29b4dee4ff72
|
evelink/__init__.py
|
evelink/__init__.py
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.0"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.1"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
Update version to 0.4.1 for release
|
Update version to 0.4.1 for release
|
Python
|
mit
|
bastianh/evelink,zigdon/evelink,ayust/evelink,Morloth1274/EVE-Online-POCO-manager,FashtimeDotCom/evelink
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.0"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
Update version to 0.4.1 for release
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.1"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
<commit_before>"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.0"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
<commit_msg>Update version to 0.4.1 for release<commit_after>
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.1"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.0"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
Update version to 0.4.1 for release"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.1"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
<commit_before>"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.0"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
<commit_msg>Update version to 0.4.1 for release<commit_after>"""EVELink - Python bindings for the EVE API."""
import logging
from evelink import account
from evelink import api
from evelink import char
from evelink import constants
from evelink import corp
from evelink import eve
from evelink import map
from evelink import server
__version__ = "0.4.1"
# Implement NullHandler because it was only added in Python 2.7+.
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Create a logger, but by default, have it do nothing
_log = logging.getLogger('evelink')
_log.addHandler(NullHandler())
# Update the version number used in the user-agent
api._user_agent = 'evelink v%s' % __version__
__all__ = [
"account",
"api",
"char",
"constants",
"corp",
"eve",
"map",
"parsing",
"server",
]
|
221ce25961d1344acf04146c1006c5dc239649ac
|
anaconda-mode/0.1.1/anaconda_mode.py
|
anaconda-mode/0.1.1/anaconda_mode.py
|
"""
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from jedi import Script, NotFoundError
from service_factory import service_factory
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
def process_definitions(f):
"""Call f and convert it result into json dumpable format."""
@wraps(f)
def wrapper(script):
return [{'name': definition.name,
'type': definition.type,
'module-name': definition.module_name,
'module-path': definition.module_path,
'line': definition.line,
'column': definition.column,
'docstring': definition.docstring(),
'description': definition.description,
'full-name': definition.full_name}
for definition in f(script)]
return wrapper
@script_method
@process_definitions
def complete(script):
"""Select auto-complete candidates for source position."""
return script.completions()
@script_method
@process_definitions
def goto_definitions(script):
"""Get definitions for thing under cursor."""
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
"""Get assignments for thing under cursor."""
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
"""Get usage information for thing under cursor."""
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
signature = signatures[0]
return {
'name': signature.name,
'index': signature.index,
'params': [param.description for param in signature.params]
}
app = [complete, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
|
Check in anaconda-mode.py so pip doesn't need to install it.
|
Check in anaconda-mode.py so pip doesn't need to install it.
|
Python
|
mit
|
Wilfred/.emacs.d,Wilfred/.emacs.d,Wilfred/.emacs.d,Wilfred/.emacs.d,Wilfred/.emacs.d,Wilfred/.emacs.d,Wilfred/.emacs.d
|
Check in anaconda-mode.py so pip doesn't need to install it.
|
"""
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from jedi import Script, NotFoundError
from service_factory import service_factory
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
def process_definitions(f):
"""Call f and convert it result into json dumpable format."""
@wraps(f)
def wrapper(script):
return [{'name': definition.name,
'type': definition.type,
'module-name': definition.module_name,
'module-path': definition.module_path,
'line': definition.line,
'column': definition.column,
'docstring': definition.docstring(),
'description': definition.description,
'full-name': definition.full_name}
for definition in f(script)]
return wrapper
@script_method
@process_definitions
def complete(script):
"""Select auto-complete candidates for source position."""
return script.completions()
@script_method
@process_definitions
def goto_definitions(script):
"""Get definitions for thing under cursor."""
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
"""Get assignments for thing under cursor."""
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
"""Get usage information for thing under cursor."""
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
signature = signatures[0]
return {
'name': signature.name,
'index': signature.index,
'params': [param.description for param in signature.params]
}
app = [complete, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
|
<commit_before><commit_msg>Check in anaconda-mode.py so pip doesn't need to install it.<commit_after>
|
"""
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from jedi import Script, NotFoundError
from service_factory import service_factory
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
def process_definitions(f):
"""Call f and convert it result into json dumpable format."""
@wraps(f)
def wrapper(script):
return [{'name': definition.name,
'type': definition.type,
'module-name': definition.module_name,
'module-path': definition.module_path,
'line': definition.line,
'column': definition.column,
'docstring': definition.docstring(),
'description': definition.description,
'full-name': definition.full_name}
for definition in f(script)]
return wrapper
@script_method
@process_definitions
def complete(script):
"""Select auto-complete candidates for source position."""
return script.completions()
@script_method
@process_definitions
def goto_definitions(script):
"""Get definitions for thing under cursor."""
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
"""Get assignments for thing under cursor."""
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
"""Get usage information for thing under cursor."""
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
signature = signatures[0]
return {
'name': signature.name,
'index': signature.index,
'params': [param.description for param in signature.params]
}
app = [complete, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
|
Check in anaconda-mode.py so pip doesn't need to install it."""
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from jedi import Script, NotFoundError
from service_factory import service_factory
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
def process_definitions(f):
"""Call f and convert it result into json dumpable format."""
@wraps(f)
def wrapper(script):
return [{'name': definition.name,
'type': definition.type,
'module-name': definition.module_name,
'module-path': definition.module_path,
'line': definition.line,
'column': definition.column,
'docstring': definition.docstring(),
'description': definition.description,
'full-name': definition.full_name}
for definition in f(script)]
return wrapper
@script_method
@process_definitions
def complete(script):
"""Select auto-complete candidates for source position."""
return script.completions()
@script_method
@process_definitions
def goto_definitions(script):
"""Get definitions for thing under cursor."""
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
"""Get assignments for thing under cursor."""
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
"""Get usage information for thing under cursor."""
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
signature = signatures[0]
return {
'name': signature.name,
'index': signature.index,
'params': [param.description for param in signature.params]
}
app = [complete, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
|
<commit_before><commit_msg>Check in anaconda-mode.py so pip doesn't need to install it.<commit_after>"""
anaconda_mode
~~~~~~~~~~~~~
This is anaconda_mode autocompletion server.
:copyright: (c) 2013-2015 by Artem Malyshev.
:license: GPL3, see LICENSE for more details.
"""
from __future__ import (
absolute_import, unicode_literals, division, print_function)
import sys
from functools import wraps
from jedi import Script, NotFoundError
from service_factory import service_factory
def script_method(f):
"""Create jedi.Script instance and apply f to it."""
@wraps(f)
def wrapper(source, line, column, path):
try:
return f(Script(source, line, column, path))
except NotFoundError:
return []
return wrapper
def process_definitions(f):
"""Call f and convert it result into json dumpable format."""
@wraps(f)
def wrapper(script):
return [{'name': definition.name,
'type': definition.type,
'module-name': definition.module_name,
'module-path': definition.module_path,
'line': definition.line,
'column': definition.column,
'docstring': definition.docstring(),
'description': definition.description,
'full-name': definition.full_name}
for definition in f(script)]
return wrapper
@script_method
@process_definitions
def complete(script):
"""Select auto-complete candidates for source position."""
return script.completions()
@script_method
@process_definitions
def goto_definitions(script):
"""Get definitions for thing under cursor."""
return script.goto_definitions()
@script_method
@process_definitions
def goto_assignments(script):
"""Get assignments for thing under cursor."""
return script.goto_assignments()
@script_method
@process_definitions
def usages(script):
"""Get usage information for thing under cursor."""
return script.usages()
@script_method
def eldoc(script):
"""Return eldoc format documentation string or ''."""
signatures = script.call_signatures()
if len(signatures) == 1:
signature = signatures[0]
return {
'name': signature.name,
'index': signature.index,
'params': [param.description for param in signature.params]
}
app = [complete, goto_definitions, goto_assignments, usages, eldoc]
if __name__ == '__main__':
host = sys.argv[1] if len(sys.argv) == 2 else '127.0.0.1'
service_factory(app, host, 'auto', 'anaconda_mode port {port}')
|
|
96c76f7a73f247374f5f0113ca048a8970398009
|
tests/unit/utils/cache_test.py
|
tests/unit/utils/cache_test.py
|
# -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert 'foo' not in cd
cd['foo'] = 'bar'
assert cd['foo'] == 'bar'
del cd['foo']
assert 'foo' not in cd
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
assert 'foo' in cd
assert cd['foo'] == 'bar'
time.sleep(0.1)
assert 'foo' not in cd
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
|
Add some unit tests for cache object
|
Add some unit tests for cache object
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add some unit tests for cache object
|
# -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert 'foo' not in cd
cd['foo'] = 'bar'
assert cd['foo'] == 'bar'
del cd['foo']
assert 'foo' not in cd
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
assert 'foo' in cd
assert cd['foo'] == 'bar'
time.sleep(0.1)
assert 'foo' not in cd
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add some unit tests for cache object<commit_after>
|
# -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert 'foo' not in cd
cd['foo'] = 'bar'
assert cd['foo'] == 'bar'
del cd['foo']
assert 'foo' not in cd
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
assert 'foo' in cd
assert cd['foo'] == 'bar'
time.sleep(0.1)
assert 'foo' not in cd
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
|
Add some unit tests for cache object# -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert 'foo' not in cd
cd['foo'] = 'bar'
assert cd['foo'] == 'bar'
del cd['foo']
assert 'foo' not in cd
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
assert 'foo' in cd
assert cd['foo'] == 'bar'
time.sleep(0.1)
assert 'foo' not in cd
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add some unit tests for cache object<commit_after># -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert 'foo' not in cd
cd['foo'] = 'bar'
assert cd['foo'] == 'bar'
del cd['foo']
assert 'foo' not in cd
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
assert 'foo' in cd
assert cd['foo'] == 'bar'
time.sleep(0.1)
assert 'foo' not in cd
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
|
|
7d6d73dab90be9d3c77cfd81d73e0b5e21c340fa
|
playlist-to-yaml.py
|
playlist-to-yaml.py
|
"""Convert Exportify csv Spotify playlists to yaml."""
import os
import glob
import pandas
import yaml
csv_files = glob.glob("*.csv")
for playlist_file in csv_files:
playlist_name, _ = os.path.splitext(playlist_file)
yaml_file = 'yaml/{}.yaml'.format(playlist_name)
print("- {}".format(yaml_file))
playlist = pandas.read_csv(playlist_file)
playlist_dict = playlist.to_dict('index')
with open(yaml_file, 'w') as fp:
fp.write('#\n# {}\n#\n'.format(playlist_name))
yaml.safe_dump(playlist_dict, fp)
|
Convert a Spotify playylist from Exportify to yaml
|
Convert a Spotify playylist from Exportify to yaml
|
Python
|
mit
|
mdpiper/wunderkammer,mdpiper/wunderkammer,mdpiper/wunderkammer,mdpiper/wunderkammer
|
Convert a Spotify playylist from Exportify to yaml
|
"""Convert Exportify csv Spotify playlists to yaml."""
import os
import glob
import pandas
import yaml
csv_files = glob.glob("*.csv")
for playlist_file in csv_files:
playlist_name, _ = os.path.splitext(playlist_file)
yaml_file = 'yaml/{}.yaml'.format(playlist_name)
print("- {}".format(yaml_file))
playlist = pandas.read_csv(playlist_file)
playlist_dict = playlist.to_dict('index')
with open(yaml_file, 'w') as fp:
fp.write('#\n# {}\n#\n'.format(playlist_name))
yaml.safe_dump(playlist_dict, fp)
|
<commit_before><commit_msg>Convert a Spotify playylist from Exportify to yaml<commit_after>
|
"""Convert Exportify csv Spotify playlists to yaml."""
import os
import glob
import pandas
import yaml
csv_files = glob.glob("*.csv")
for playlist_file in csv_files:
playlist_name, _ = os.path.splitext(playlist_file)
yaml_file = 'yaml/{}.yaml'.format(playlist_name)
print("- {}".format(yaml_file))
playlist = pandas.read_csv(playlist_file)
playlist_dict = playlist.to_dict('index')
with open(yaml_file, 'w') as fp:
fp.write('#\n# {}\n#\n'.format(playlist_name))
yaml.safe_dump(playlist_dict, fp)
|
Convert a Spotify playylist from Exportify to yaml"""Convert Exportify csv Spotify playlists to yaml."""
import os
import glob
import pandas
import yaml
csv_files = glob.glob("*.csv")
for playlist_file in csv_files:
playlist_name, _ = os.path.splitext(playlist_file)
yaml_file = 'yaml/{}.yaml'.format(playlist_name)
print("- {}".format(yaml_file))
playlist = pandas.read_csv(playlist_file)
playlist_dict = playlist.to_dict('index')
with open(yaml_file, 'w') as fp:
fp.write('#\n# {}\n#\n'.format(playlist_name))
yaml.safe_dump(playlist_dict, fp)
|
<commit_before><commit_msg>Convert a Spotify playylist from Exportify to yaml<commit_after>"""Convert Exportify csv Spotify playlists to yaml."""
import os
import glob
import pandas
import yaml
csv_files = glob.glob("*.csv")
for playlist_file in csv_files:
playlist_name, _ = os.path.splitext(playlist_file)
yaml_file = 'yaml/{}.yaml'.format(playlist_name)
print("- {}".format(yaml_file))
playlist = pandas.read_csv(playlist_file)
playlist_dict = playlist.to_dict('index')
with open(yaml_file, 'w') as fp:
fp.write('#\n# {}\n#\n'.format(playlist_name))
yaml.safe_dump(playlist_dict, fp)
|
|
a414bf58797809c3d79251fe0c818b1496bb36a8
|
scripts/hmi.py
|
scripts/hmi.py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from controller.msg import control
mux_pub = rospy.Publisher('pwm_signal_input', Int32, queue_size=10)
def controller_callback(data):
mux_pub.publish(data.strafe_X)
print(data.strafe_X)
if __name__ == '__main__':
rospy.init_node('mux_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber('control', control, controller_callback)
rospy.spin()
|
Add human machine interface node
|
Add human machine interface node
|
Python
|
mit
|
vortexntnu/rov-control,vortexntnu/rov-control,vortexntnu/rov-control
|
Add human machine interface node
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from controller.msg import control
mux_pub = rospy.Publisher('pwm_signal_input', Int32, queue_size=10)
def controller_callback(data):
mux_pub.publish(data.strafe_X)
print(data.strafe_X)
if __name__ == '__main__':
rospy.init_node('mux_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber('control', control, controller_callback)
rospy.spin()
|
<commit_before><commit_msg>Add human machine interface node<commit_after>
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from controller.msg import control
mux_pub = rospy.Publisher('pwm_signal_input', Int32, queue_size=10)
def controller_callback(data):
mux_pub.publish(data.strafe_X)
print(data.strafe_X)
if __name__ == '__main__':
rospy.init_node('mux_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber('control', control, controller_callback)
rospy.spin()
|
Add human machine interface node#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from controller.msg import control
mux_pub = rospy.Publisher('pwm_signal_input', Int32, queue_size=10)
def controller_callback(data):
mux_pub.publish(data.strafe_X)
print(data.strafe_X)
if __name__ == '__main__':
rospy.init_node('mux_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber('control', control, controller_callback)
rospy.spin()
|
<commit_before><commit_msg>Add human machine interface node<commit_after>#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from controller.msg import control
mux_pub = rospy.Publisher('pwm_signal_input', Int32, queue_size=10)
def controller_callback(data):
mux_pub.publish(data.strafe_X)
print(data.strafe_X)
if __name__ == '__main__':
rospy.init_node('mux_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber('control', control, controller_callback)
rospy.spin()
|
|
4bc449e790bec8faf700162e3b9c98e001aa1ff2
|
python/grayscale.py
|
python/grayscale.py
|
import requests
import json
# Convert a given image to gray color model. A grayscale (or graylevel) image is simply one in which the only colors are shades of gray.
# https://pixlab.io/#/cmd?id=grayscale for additional information.
req = requests.get('https://api.pixlab.io/grayscale',params={'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the grayscaled picture: "+ reply['link'])
|
Convert a given image to gray color model
|
Convert a given image to gray color model
|
Python
|
bsd-2-clause
|
symisc/pixlab,symisc/pixlab,symisc/pixlab
|
Convert a given image to gray color model
|
import requests
import json
# Convert a given image to gray color model. A grayscale (or graylevel) image is simply one in which the only colors are shades of gray.
# https://pixlab.io/#/cmd?id=grayscale for additional information.
req = requests.get('https://api.pixlab.io/grayscale',params={'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the grayscaled picture: "+ reply['link'])
|
<commit_before><commit_msg>Convert a given image to gray color model<commit_after>
|
import requests
import json
# Convert a given image to gray color model. A grayscale (or graylevel) image is simply one in which the only colors are shades of gray.
# https://pixlab.io/#/cmd?id=grayscale for additional information.
req = requests.get('https://api.pixlab.io/grayscale',params={'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the grayscaled picture: "+ reply['link'])
|
Convert a given image to gray color modelimport requests
import json
# Convert a given image to gray color model. A grayscale (or graylevel) image is simply one in which the only colors are shades of gray.
# https://pixlab.io/#/cmd?id=grayscale for additional information.
req = requests.get('https://api.pixlab.io/grayscale',params={'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the grayscaled picture: "+ reply['link'])
|
<commit_before><commit_msg>Convert a given image to gray color model<commit_after>import requests
import json
# Convert a given image to gray color model. A grayscale (or graylevel) image is simply one in which the only colors are shades of gray.
# https://pixlab.io/#/cmd?id=grayscale for additional information.
req = requests.get('https://api.pixlab.io/grayscale',params={'img':'https://www.allaboutbirds.org/guide/PHOTO/LARGE/blue_jay_8.jpg','key':'My_PixLab_Key'})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the grayscaled picture: "+ reply['link'])
|
|
d3eaa685f788fc6dd19681175d9e0b45a6989fdf
|
caffe2/python/layers/add_bias.py
|
caffe2/python/layers/add_bias.py
|
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
import math
import numpy as np
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[input_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
Add bias to cosine distance for two tower models
|
Add bias to cosine distance for two tower models
Summary: Currently using two tower models with cosine distance results in bad calibration. Adding bias to the output of cosine term solves the problem.
Reviewed By: xianjiec
Differential Revision: D5132606
fbshipit-source-id: eb4fa75acf908db89954eeee67627b4a00572f61
|
Python
|
apache-2.0
|
pietern/caffe2,pietern/caffe2,xzturn/caffe2,pietern/caffe2,bwasti/caffe2,pietern/caffe2,davinwang/caffe2,pietern/caffe2,xzturn/caffe2,davinwang/caffe2,davinwang/caffe2,bwasti/caffe2,xzturn/caffe2,davinwang/caffe2,xzturn/caffe2,Yangqing/caffe2,xzturn/caffe2,davinwang/caffe2,bwasti/caffe2,bwasti/caffe2,Yangqing/caffe2,sf-wind/caffe2,sf-wind/caffe2,Yangqing/caffe2,sf-wind/caffe2,Yangqing/caffe2,sf-wind/caffe2,Yangqing/caffe2,bwasti/caffe2,sf-wind/caffe2,caffe2/caffe2
|
Add bias to cosine distance for two tower models
Summary: Currently using two tower models with cosine distance results in bad calibration. Adding bias to the output of cosine term solves the problem.
Reviewed By: xianjiec
Differential Revision: D5132606
fbshipit-source-id: eb4fa75acf908db89954eeee67627b4a00572f61
|
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
import math
import numpy as np
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[input_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
<commit_before><commit_msg>Add bias to cosine distance for two tower models
Summary: Currently using two tower models with cosine distance results in bad calibration. Adding bias to the output of cosine term solves the problem.
Reviewed By: xianjiec
Differential Revision: D5132606
fbshipit-source-id: eb4fa75acf908db89954eeee67627b4a00572f61<commit_after>
|
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
import math
import numpy as np
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[input_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
Add bias to cosine distance for two tower models
Summary: Currently using two tower models with cosine distance results in bad calibration. Adding bias to the output of cosine term solves the problem.
Reviewed By: xianjiec
Differential Revision: D5132606
fbshipit-source-id: eb4fa75acf908db89954eeee67627b4a00572f61## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
import math
import numpy as np
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[input_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
<commit_before><commit_msg>Add bias to cosine distance for two tower models
Summary: Currently using two tower models with cosine distance results in bad calibration. Adding bias to the output of cosine term solves the problem.
Reviewed By: xianjiec
Differential Revision: D5132606
fbshipit-source-id: eb4fa75acf908db89954eeee67627b4a00572f61<commit_after>## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
import math
import numpy as np
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[input_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
|
437b334588ce05bb94d489e51cb43af76cd5180c
|
localtv/management/commands/clear_tiers_state.py
|
localtv/management/commands/clear_tiers_state.py
|
# This file is part of Miro Community.
# Copyright (C) 2011 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import traceback
from django.core.files.storage import default_storage
from django.core.management.base import NoArgsCommand
from django.db.models import Q
import localtv.models
class Command(NoArgsCommand):
args = ''
def handle_noargs(self, verbosity=0, **options):
### Reset the tiers state to:
# - tier is basic
# - all TierData is blank
### If you want a proper simulation of deployed sites, you should
### make sure to set settings.LOCALTV_DISABLE_TIERS_ENFORCEMENT to True
sitelocation = localtv.models.SiteLocation.objects.get_current()
tier_info = localtv.models.TierInfo.objects.get_current()
sitelocation.tier_name = 'basic'
sitelocation.save()
tier_info.payment_due_date = None
tier_info.free_trial_available = True
tier_info.free_trial_started_on = None
tier_info.in_free_trial = False
tier_info.payment_secret = ''
tier_info.get_payment_secret() # fill the payment secret field
tier_info.current_paypal_profile_id = ''
tier_info.video_allotment_warning_sent = False
tier_info.free_trial_warning_sent = False
tier_info.already_sent_welcome_email = False
tier_info.inactive_site_warning_sent = False
tier_info.user_has_successfully_performed_a_paypal_transaction = False
tier_info.already_sent_tiers_compliance_email = False
tier_info.save()
|
Add a management command that resets tiers state
|
Add a management command that resets tiers state
|
Python
|
agpl-3.0
|
pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity
|
Add a management command that resets tiers state
|
# This file is part of Miro Community.
# Copyright (C) 2011 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import traceback
from django.core.files.storage import default_storage
from django.core.management.base import NoArgsCommand
from django.db.models import Q
import localtv.models
class Command(NoArgsCommand):
args = ''
def handle_noargs(self, verbosity=0, **options):
### Reset the tiers state to:
# - tier is basic
# - all TierData is blank
### If you want a proper simulation of deployed sites, you should
### make sure to set settings.LOCALTV_DISABLE_TIERS_ENFORCEMENT to True
sitelocation = localtv.models.SiteLocation.objects.get_current()
tier_info = localtv.models.TierInfo.objects.get_current()
sitelocation.tier_name = 'basic'
sitelocation.save()
tier_info.payment_due_date = None
tier_info.free_trial_available = True
tier_info.free_trial_started_on = None
tier_info.in_free_trial = False
tier_info.payment_secret = ''
tier_info.get_payment_secret() # fill the payment secret field
tier_info.current_paypal_profile_id = ''
tier_info.video_allotment_warning_sent = False
tier_info.free_trial_warning_sent = False
tier_info.already_sent_welcome_email = False
tier_info.inactive_site_warning_sent = False
tier_info.user_has_successfully_performed_a_paypal_transaction = False
tier_info.already_sent_tiers_compliance_email = False
tier_info.save()
|
<commit_before><commit_msg>Add a management command that resets tiers state<commit_after>
|
# This file is part of Miro Community.
# Copyright (C) 2011 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import traceback
from django.core.files.storage import default_storage
from django.core.management.base import NoArgsCommand
from django.db.models import Q
import localtv.models
class Command(NoArgsCommand):
args = ''
def handle_noargs(self, verbosity=0, **options):
### Reset the tiers state to:
# - tier is basic
# - all TierData is blank
### If you want a proper simulation of deployed sites, you should
### make sure to set settings.LOCALTV_DISABLE_TIERS_ENFORCEMENT to True
sitelocation = localtv.models.SiteLocation.objects.get_current()
tier_info = localtv.models.TierInfo.objects.get_current()
sitelocation.tier_name = 'basic'
sitelocation.save()
tier_info.payment_due_date = None
tier_info.free_trial_available = True
tier_info.free_trial_started_on = None
tier_info.in_free_trial = False
tier_info.payment_secret = ''
tier_info.get_payment_secret() # fill the payment secret field
tier_info.current_paypal_profile_id = ''
tier_info.video_allotment_warning_sent = False
tier_info.free_trial_warning_sent = False
tier_info.already_sent_welcome_email = False
tier_info.inactive_site_warning_sent = False
tier_info.user_has_successfully_performed_a_paypal_transaction = False
tier_info.already_sent_tiers_compliance_email = False
tier_info.save()
|
Add a management command that resets tiers state# This file is part of Miro Community.
# Copyright (C) 2011 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import traceback
from django.core.files.storage import default_storage
from django.core.management.base import NoArgsCommand
from django.db.models import Q
import localtv.models
class Command(NoArgsCommand):
args = ''
def handle_noargs(self, verbosity=0, **options):
### Reset the tiers state to:
# - tier is basic
# - all TierData is blank
### If you want a proper simulation of deployed sites, you should
### make sure to set settings.LOCALTV_DISABLE_TIERS_ENFORCEMENT to True
sitelocation = localtv.models.SiteLocation.objects.get_current()
tier_info = localtv.models.TierInfo.objects.get_current()
sitelocation.tier_name = 'basic'
sitelocation.save()
tier_info.payment_due_date = None
tier_info.free_trial_available = True
tier_info.free_trial_started_on = None
tier_info.in_free_trial = False
tier_info.payment_secret = ''
tier_info.get_payment_secret() # fill the payment secret field
tier_info.current_paypal_profile_id = ''
tier_info.video_allotment_warning_sent = False
tier_info.free_trial_warning_sent = False
tier_info.already_sent_welcome_email = False
tier_info.inactive_site_warning_sent = False
tier_info.user_has_successfully_performed_a_paypal_transaction = False
tier_info.already_sent_tiers_compliance_email = False
tier_info.save()
|
<commit_before><commit_msg>Add a management command that resets tiers state<commit_after># This file is part of Miro Community.
# Copyright (C) 2011 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import traceback
from django.core.files.storage import default_storage
from django.core.management.base import NoArgsCommand
from django.db.models import Q
import localtv.models
class Command(NoArgsCommand):
args = ''
def handle_noargs(self, verbosity=0, **options):
### Reset the tiers state to:
# - tier is basic
# - all TierData is blank
### If you want a proper simulation of deployed sites, you should
### make sure to set settings.LOCALTV_DISABLE_TIERS_ENFORCEMENT to True
sitelocation = localtv.models.SiteLocation.objects.get_current()
tier_info = localtv.models.TierInfo.objects.get_current()
sitelocation.tier_name = 'basic'
sitelocation.save()
tier_info.payment_due_date = None
tier_info.free_trial_available = True
tier_info.free_trial_started_on = None
tier_info.in_free_trial = False
tier_info.payment_secret = ''
tier_info.get_payment_secret() # fill the payment secret field
tier_info.current_paypal_profile_id = ''
tier_info.video_allotment_warning_sent = False
tier_info.free_trial_warning_sent = False
tier_info.already_sent_welcome_email = False
tier_info.inactive_site_warning_sent = False
tier_info.user_has_successfully_performed_a_paypal_transaction = False
tier_info.already_sent_tiers_compliance_email = False
tier_info.save()
|
|
e2be62a7c4e5184851b4f0383a324ffefa1e3b48
|
tools/touch_all_files.py
|
tools/touch_all_files.py
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files. Patch by Dan Callahan.
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@608 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1
|
Python
|
bsd-3-clause
|
ericholscher/pypi,ericholscher/pypi
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@608 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@608 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1<commit_after>
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@608 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@608 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1<commit_after>#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
|
592786370f21730bcf574b2ce483550ee176ca20
|
python/chapter_4.py
|
python/chapter_4.py
|
import math
class Opt:
def __init__(self, v = None):
self.valid = (v != None)
self.value = v
def safe_rezi(x):
if x == 0:
return Opt()
else:
return Opt(1/x)
def safe_root(x):
if x < 0:
return Opt()
else:
return Opt(math.sqrt(x))
def compose(f,g):
def h(x):
y = g(x)
if y.valid:
return f(y.value)
else:
return Opt()
return h
h = compose(safe_root, safe_rezi)
|
Add Python Chapter 4 Challenges
|
Add Python Chapter 4 Challenges
|
Python
|
mit
|
stefanheyder/ReadingCourseCategoryTheroy,stefanheyder/ReadingCourseCategoryTheroy
|
Add Python Chapter 4 Challenges
|
import math
class Opt:
def __init__(self, v = None):
self.valid = (v != None)
self.value = v
def safe_rezi(x):
if x == 0:
return Opt()
else:
return Opt(1/x)
def safe_root(x):
if x < 0:
return Opt()
else:
return Opt(math.sqrt(x))
def compose(f,g):
def h(x):
y = g(x)
if y.valid:
return f(y.value)
else:
return Opt()
return h
h = compose(safe_root, safe_rezi)
|
<commit_before><commit_msg>Add Python Chapter 4 Challenges<commit_after>
|
import math
class Opt:
def __init__(self, v = None):
self.valid = (v != None)
self.value = v
def safe_rezi(x):
if x == 0:
return Opt()
else:
return Opt(1/x)
def safe_root(x):
if x < 0:
return Opt()
else:
return Opt(math.sqrt(x))
def compose(f,g):
def h(x):
y = g(x)
if y.valid:
return f(y.value)
else:
return Opt()
return h
h = compose(safe_root, safe_rezi)
|
Add Python Chapter 4 Challengesimport math
class Opt:
def __init__(self, v = None):
self.valid = (v != None)
self.value = v
def safe_rezi(x):
if x == 0:
return Opt()
else:
return Opt(1/x)
def safe_root(x):
if x < 0:
return Opt()
else:
return Opt(math.sqrt(x))
def compose(f,g):
def h(x):
y = g(x)
if y.valid:
return f(y.value)
else:
return Opt()
return h
h = compose(safe_root, safe_rezi)
|
<commit_before><commit_msg>Add Python Chapter 4 Challenges<commit_after>import math
class Opt:
def __init__(self, v = None):
self.valid = (v != None)
self.value = v
def safe_rezi(x):
if x == 0:
return Opt()
else:
return Opt(1/x)
def safe_root(x):
if x < 0:
return Opt()
else:
return Opt(math.sqrt(x))
def compose(f,g):
def h(x):
y = g(x)
if y.valid:
return f(y.value)
else:
return Opt()
return h
h = compose(safe_root, safe_rezi)
|
|
28bb129931e14d5681ba717f6c949e2305fd2e03
|
django/website/main/tests/test_merge_coverage_handling.py
|
django/website/main/tests/test_merge_coverage_handling.py
|
from mock import Mock
from main.management.commands.merge_coverage_files import Command
from main.tests.helper_methods import mock_out_unwanted_methods
def test_merge_coverage_handle_calls_parse_options():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['prepare_packagefilters', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.parse_options = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.parse_options.called
def test_merge_coverage_handle_calls_prepare_packagefilters():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.prepare_packagefilters = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.prepare_packagefilters.called
def test_write_filtered_data_called_when_filteronly_is_true():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = True
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_merged_data'])
merge_coverage_files_command.write_filtered_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_filtered_data.called
def test_write_merged_data_called_when_filteronly_is_false():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = False
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_filtered_data'])
merge_coverage_files_command.write_merged_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_merged_data.called
|
Add tests to run command to merge content
|
Add tests to run command to merge content
|
Python
|
agpl-3.0
|
daniell/kashana,aptivate/alfie,daniell/kashana,aptivate/alfie,daniell/kashana,daniell/kashana,aptivate/kashana,aptivate/kashana,aptivate/kashana,aptivate/alfie,aptivate/alfie,aptivate/kashana
|
Add tests to run command to merge content
|
from mock import Mock
from main.management.commands.merge_coverage_files import Command
from main.tests.helper_methods import mock_out_unwanted_methods
def test_merge_coverage_handle_calls_parse_options():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['prepare_packagefilters', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.parse_options = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.parse_options.called
def test_merge_coverage_handle_calls_prepare_packagefilters():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.prepare_packagefilters = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.prepare_packagefilters.called
def test_write_filtered_data_called_when_filteronly_is_true():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = True
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_merged_data'])
merge_coverage_files_command.write_filtered_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_filtered_data.called
def test_write_merged_data_called_when_filteronly_is_false():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = False
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_filtered_data'])
merge_coverage_files_command.write_merged_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_merged_data.called
|
<commit_before><commit_msg>Add tests to run command to merge content<commit_after>
|
from mock import Mock
from main.management.commands.merge_coverage_files import Command
from main.tests.helper_methods import mock_out_unwanted_methods
def test_merge_coverage_handle_calls_parse_options():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['prepare_packagefilters', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.parse_options = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.parse_options.called
def test_merge_coverage_handle_calls_prepare_packagefilters():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.prepare_packagefilters = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.prepare_packagefilters.called
def test_write_filtered_data_called_when_filteronly_is_true():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = True
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_merged_data'])
merge_coverage_files_command.write_filtered_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_filtered_data.called
def test_write_merged_data_called_when_filteronly_is_false():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = False
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_filtered_data'])
merge_coverage_files_command.write_merged_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_merged_data.called
|
Add tests to run command to merge contentfrom mock import Mock
from main.management.commands.merge_coverage_files import Command
from main.tests.helper_methods import mock_out_unwanted_methods
def test_merge_coverage_handle_calls_parse_options():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['prepare_packagefilters', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.parse_options = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.parse_options.called
def test_merge_coverage_handle_calls_prepare_packagefilters():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.prepare_packagefilters = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.prepare_packagefilters.called
def test_write_filtered_data_called_when_filteronly_is_true():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = True
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_merged_data'])
merge_coverage_files_command.write_filtered_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_filtered_data.called
def test_write_merged_data_called_when_filteronly_is_false():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = False
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_filtered_data'])
merge_coverage_files_command.write_merged_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_merged_data.called
|
<commit_before><commit_msg>Add tests to run command to merge content<commit_after>from mock import Mock
from main.management.commands.merge_coverage_files import Command
from main.tests.helper_methods import mock_out_unwanted_methods
def test_merge_coverage_handle_calls_parse_options():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['prepare_packagefilters', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.parse_options = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.parse_options.called
def test_merge_coverage_handle_calls_prepare_packagefilters():
merge_coverage_files_command = Command()
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'write_filtered_data', 'write_merged_data'])
merge_coverage_files_command.prepare_packagefilters = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.prepare_packagefilters.called
def test_write_filtered_data_called_when_filteronly_is_true():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = True
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_merged_data'])
merge_coverage_files_command.write_filtered_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_filtered_data.called
def test_write_merged_data_called_when_filteronly_is_false():
merge_coverage_files_command = Command()
merge_coverage_files_command.filteronly = False
# We don't want these methods to run
mock_out_unwanted_methods(merge_coverage_files_command, ['parse_options', 'prepare_packagefilters', 'write_filtered_data'])
merge_coverage_files_command.write_merged_data = Mock()
merge_coverage_files_command.handle()
assert merge_coverage_files_command.write_merged_data.called
|
|
9a56b447aaf546814e4e87bc6d60855b33fdf3ff
|
tests/test_pgbackup.py
|
tests/test_pgbackup.py
|
# coding: utf-8
"""
Unit tests for essential functions in postgresql backup.
"""
from unittest.mock import MagicMock, mock_open, patch
import pytest
import smdba.postgresqlgate
class TestPgBackup:
"""
Test suite for postgresql backup.
"""
@patch("smdba.postgresqlgate.os.path.exists", MagicMock(return_value=False))
def test_init_pkbackup_checks_archivecleaup(self):
"""
Test constructor of pkgbackup pg_archivecleanup installed
:return:
"""
with pytest.raises(Exception) as exc:
smdba.postgresqlgate.PgBackup("/target")
assert "The utility pg_archivecleanup was not found on the path." in str(exc)
|
Add unit test suite for PgBackup
|
Add unit test suite for PgBackup
|
Python
|
mit
|
SUSE/smdba,SUSE/smdba
|
Add unit test suite for PgBackup
|
# coding: utf-8
"""
Unit tests for essential functions in postgresql backup.
"""
from unittest.mock import MagicMock, mock_open, patch
import pytest
import smdba.postgresqlgate
class TestPgBackup:
"""
Test suite for postgresql backup.
"""
@patch("smdba.postgresqlgate.os.path.exists", MagicMock(return_value=False))
def test_init_pkbackup_checks_archivecleaup(self):
"""
Test constructor of pkgbackup pg_archivecleanup installed
:return:
"""
with pytest.raises(Exception) as exc:
smdba.postgresqlgate.PgBackup("/target")
assert "The utility pg_archivecleanup was not found on the path." in str(exc)
|
<commit_before><commit_msg>Add unit test suite for PgBackup<commit_after>
|
# coding: utf-8
"""
Unit tests for essential functions in postgresql backup.
"""
from unittest.mock import MagicMock, mock_open, patch
import pytest
import smdba.postgresqlgate
class TestPgBackup:
"""
Test suite for postgresql backup.
"""
@patch("smdba.postgresqlgate.os.path.exists", MagicMock(return_value=False))
def test_init_pkbackup_checks_archivecleaup(self):
"""
Test constructor of pkgbackup pg_archivecleanup installed
:return:
"""
with pytest.raises(Exception) as exc:
smdba.postgresqlgate.PgBackup("/target")
assert "The utility pg_archivecleanup was not found on the path." in str(exc)
|
Add unit test suite for PgBackup# coding: utf-8
"""
Unit tests for essential functions in postgresql backup.
"""
from unittest.mock import MagicMock, mock_open, patch
import pytest
import smdba.postgresqlgate
class TestPgBackup:
"""
Test suite for postgresql backup.
"""
@patch("smdba.postgresqlgate.os.path.exists", MagicMock(return_value=False))
def test_init_pkbackup_checks_archivecleaup(self):
"""
Test constructor of pkgbackup pg_archivecleanup installed
:return:
"""
with pytest.raises(Exception) as exc:
smdba.postgresqlgate.PgBackup("/target")
assert "The utility pg_archivecleanup was not found on the path." in str(exc)
|
<commit_before><commit_msg>Add unit test suite for PgBackup<commit_after># coding: utf-8
"""
Unit tests for essential functions in postgresql backup.
"""
from unittest.mock import MagicMock, mock_open, patch
import pytest
import smdba.postgresqlgate
class TestPgBackup:
"""
Test suite for postgresql backup.
"""
@patch("smdba.postgresqlgate.os.path.exists", MagicMock(return_value=False))
def test_init_pkbackup_checks_archivecleaup(self):
"""
Test constructor of pkgbackup pg_archivecleanup installed
:return:
"""
with pytest.raises(Exception) as exc:
smdba.postgresqlgate.PgBackup("/target")
assert "The utility pg_archivecleanup was not found on the path." in str(exc)
|
|
eee3d08ae24174ae0c44853a688e30d3adaf13c5
|
cryptex/pl_calculator.py
|
cryptex/pl_calculator.py
|
from decimal import Decimal
from cryptex.transaction import Deposit, Withdrawal
from cryptex.trade import Buy, Sell
from functools import partial
class PLCalculator(object):
def __init__(self, exchange):
self.exchange = exchange
@staticmethod
def convert_transaction(market, tx):
"""
Convert to buy orders or sell trades of 0 cost and 0 price
"""
base, counter = market
if isinstance(tx, Deposit):
trade_cls = Buy
else:
trade_cls = Sell
return trade_cls(None, base, counter, tx.datetime, None,
tx.amount, Decimal('0'))
def _get_trades(self, market):
"""
Returns all trades in a particular market along with
transaction of the base currency, sorted by time.
"""
base, counter = market
trades = [t for t in self.exchange.get_my_trades()
if t.base_currency == base and
t.counter_currency == counter]
txs = [t for t in self.exchange.get_my_transactions()
if t.currency == base]
tx_trades = map(partial(PLCalculator.convert_transaction, market), txs)
all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime)
return all_trades
def unrealized_pl(self, market):
base, counter = market
trades = self._get_trades(market)
def merge_trades(acc, trade):
if isinstance(trade, Buy):
new_trade = Buy(None, base, counter, trade.datetime, None,
trade.amount, trade.price)
acc.append(new_trade)
else:
oldest_buy = None
total_amount = Decimal('0')
while total_amount < trade.amount:
oldest_buy = acc.pop()
total_amount += oldest_buy.amount
buy_amount = trade.amount - total_amount
if buy_amount != Decimal('0'):
new_trade = Buy(None, base, counter, oldest_buy.datetime,
None, buy_amount, oldest_buy.price)
acc.append(new_trade)
return acc
return reduce(merge_trades, trades, [])
|
Add PL Calculator for unrealized profit and loss calculation
|
Add PL Calculator for unrealized profit and loss calculation
|
Python
|
mit
|
coink/cryptex
|
Add PL Calculator for unrealized profit and loss calculation
|
from decimal import Decimal
from cryptex.transaction import Deposit, Withdrawal
from cryptex.trade import Buy, Sell
from functools import partial
class PLCalculator(object):
def __init__(self, exchange):
self.exchange = exchange
@staticmethod
def convert_transaction(market, tx):
"""
Convert to buy orders or sell trades of 0 cost and 0 price
"""
base, counter = market
if isinstance(tx, Deposit):
trade_cls = Buy
else:
trade_cls = Sell
return trade_cls(None, base, counter, tx.datetime, None,
tx.amount, Decimal('0'))
def _get_trades(self, market):
"""
Returns all trades in a particular market along with
transaction of the base currency, sorted by time.
"""
base, counter = market
trades = [t for t in self.exchange.get_my_trades()
if t.base_currency == base and
t.counter_currency == counter]
txs = [t for t in self.exchange.get_my_transactions()
if t.currency == base]
tx_trades = map(partial(PLCalculator.convert_transaction, market), txs)
all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime)
return all_trades
def unrealized_pl(self, market):
base, counter = market
trades = self._get_trades(market)
def merge_trades(acc, trade):
if isinstance(trade, Buy):
new_trade = Buy(None, base, counter, trade.datetime, None,
trade.amount, trade.price)
acc.append(new_trade)
else:
oldest_buy = None
total_amount = Decimal('0')
while total_amount < trade.amount:
oldest_buy = acc.pop()
total_amount += oldest_buy.amount
buy_amount = trade.amount - total_amount
if buy_amount != Decimal('0'):
new_trade = Buy(None, base, counter, oldest_buy.datetime,
None, buy_amount, oldest_buy.price)
acc.append(new_trade)
return acc
return reduce(merge_trades, trades, [])
|
<commit_before><commit_msg>Add PL Calculator for unrealized profit and loss calculation<commit_after>
|
from decimal import Decimal
from cryptex.transaction import Deposit, Withdrawal
from cryptex.trade import Buy, Sell
from functools import partial
class PLCalculator(object):
def __init__(self, exchange):
self.exchange = exchange
@staticmethod
def convert_transaction(market, tx):
"""
Convert to buy orders or sell trades of 0 cost and 0 price
"""
base, counter = market
if isinstance(tx, Deposit):
trade_cls = Buy
else:
trade_cls = Sell
return trade_cls(None, base, counter, tx.datetime, None,
tx.amount, Decimal('0'))
def _get_trades(self, market):
"""
Returns all trades in a particular market along with
transaction of the base currency, sorted by time.
"""
base, counter = market
trades = [t for t in self.exchange.get_my_trades()
if t.base_currency == base and
t.counter_currency == counter]
txs = [t for t in self.exchange.get_my_transactions()
if t.currency == base]
tx_trades = map(partial(PLCalculator.convert_transaction, market), txs)
all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime)
return all_trades
def unrealized_pl(self, market):
base, counter = market
trades = self._get_trades(market)
def merge_trades(acc, trade):
if isinstance(trade, Buy):
new_trade = Buy(None, base, counter, trade.datetime, None,
trade.amount, trade.price)
acc.append(new_trade)
else:
oldest_buy = None
total_amount = Decimal('0')
while total_amount < trade.amount:
oldest_buy = acc.pop()
total_amount += oldest_buy.amount
buy_amount = trade.amount - total_amount
if buy_amount != Decimal('0'):
new_trade = Buy(None, base, counter, oldest_buy.datetime,
None, buy_amount, oldest_buy.price)
acc.append(new_trade)
return acc
return reduce(merge_trades, trades, [])
|
Add PL Calculator for unrealized profit and loss calculationfrom decimal import Decimal
from cryptex.transaction import Deposit, Withdrawal
from cryptex.trade import Buy, Sell
from functools import partial
class PLCalculator(object):
def __init__(self, exchange):
self.exchange = exchange
@staticmethod
def convert_transaction(market, tx):
"""
Convert to buy orders or sell trades of 0 cost and 0 price
"""
base, counter = market
if isinstance(tx, Deposit):
trade_cls = Buy
else:
trade_cls = Sell
return trade_cls(None, base, counter, tx.datetime, None,
tx.amount, Decimal('0'))
def _get_trades(self, market):
"""
Returns all trades in a particular market along with
transaction of the base currency, sorted by time.
"""
base, counter = market
trades = [t for t in self.exchange.get_my_trades()
if t.base_currency == base and
t.counter_currency == counter]
txs = [t for t in self.exchange.get_my_transactions()
if t.currency == base]
tx_trades = map(partial(PLCalculator.convert_transaction, market), txs)
all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime)
return all_trades
def unrealized_pl(self, market):
base, counter = market
trades = self._get_trades(market)
def merge_trades(acc, trade):
if isinstance(trade, Buy):
new_trade = Buy(None, base, counter, trade.datetime, None,
trade.amount, trade.price)
acc.append(new_trade)
else:
oldest_buy = None
total_amount = Decimal('0')
while total_amount < trade.amount:
oldest_buy = acc.pop()
total_amount += oldest_buy.amount
buy_amount = trade.amount - total_amount
if buy_amount != Decimal('0'):
new_trade = Buy(None, base, counter, oldest_buy.datetime,
None, buy_amount, oldest_buy.price)
acc.append(new_trade)
return acc
return reduce(merge_trades, trades, [])
|
<commit_before><commit_msg>Add PL Calculator for unrealized profit and loss calculation<commit_after>from decimal import Decimal
from cryptex.transaction import Deposit, Withdrawal
from cryptex.trade import Buy, Sell
from functools import partial
class PLCalculator(object):
def __init__(self, exchange):
self.exchange = exchange
@staticmethod
def convert_transaction(market, tx):
"""
Convert to buy orders or sell trades of 0 cost and 0 price
"""
base, counter = market
if isinstance(tx, Deposit):
trade_cls = Buy
else:
trade_cls = Sell
return trade_cls(None, base, counter, tx.datetime, None,
tx.amount, Decimal('0'))
def _get_trades(self, market):
"""
Returns all trades in a particular market along with
transaction of the base currency, sorted by time.
"""
base, counter = market
trades = [t for t in self.exchange.get_my_trades()
if t.base_currency == base and
t.counter_currency == counter]
txs = [t for t in self.exchange.get_my_transactions()
if t.currency == base]
tx_trades = map(partial(PLCalculator.convert_transaction, market), txs)
all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime)
return all_trades
def unrealized_pl(self, market):
base, counter = market
trades = self._get_trades(market)
def merge_trades(acc, trade):
if isinstance(trade, Buy):
new_trade = Buy(None, base, counter, trade.datetime, None,
trade.amount, trade.price)
acc.append(new_trade)
else:
oldest_buy = None
total_amount = Decimal('0')
while total_amount < trade.amount:
oldest_buy = acc.pop()
total_amount += oldest_buy.amount
buy_amount = trade.amount - total_amount
if buy_amount != Decimal('0'):
new_trade = Buy(None, base, counter, oldest_buy.datetime,
None, buy_amount, oldest_buy.price)
acc.append(new_trade)
return acc
return reduce(merge_trades, trades, [])
|
|
a4e5a2d0efa013847f0b1e90954739fe7224e30d
|
tests/test_resources.py
|
tests/test_resources.py
|
import pytest
from mock import patch, Mock
from spanky.resources import package
class TestPackageResource(object):
def test_package_install(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
pack.installed = Mock()
pack.installed.return_value = True
pack.install()
expected = [
'apt-get', 'install', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_remove(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'is not installed'
pack.installed = Mock()
pack.installed.return_value = False
pack.remove()
expected = [
'apt-get', 'remove', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_installed(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
assert pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
pack._ok.return_value = 'nginx is not installed'
assert not pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
|
Add unit tests for package resource
|
Add unit tests for package resource
|
Python
|
bsd-3-clause
|
pglbutt/spanky,pglbutt/spanky,pglbutt/spanky
|
Add unit tests for package resource
|
import pytest
from mock import patch, Mock
from spanky.resources import package
class TestPackageResource(object):
def test_package_install(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
pack.installed = Mock()
pack.installed.return_value = True
pack.install()
expected = [
'apt-get', 'install', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_remove(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'is not installed'
pack.installed = Mock()
pack.installed.return_value = False
pack.remove()
expected = [
'apt-get', 'remove', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_installed(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
assert pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
pack._ok.return_value = 'nginx is not installed'
assert not pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
|
<commit_before><commit_msg>Add unit tests for package resource<commit_after>
|
import pytest
from mock import patch, Mock
from spanky.resources import package
class TestPackageResource(object):
def test_package_install(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
pack.installed = Mock()
pack.installed.return_value = True
pack.install()
expected = [
'apt-get', 'install', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_remove(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'is not installed'
pack.installed = Mock()
pack.installed.return_value = False
pack.remove()
expected = [
'apt-get', 'remove', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_installed(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
assert pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
pack._ok.return_value = 'nginx is not installed'
assert not pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
|
Add unit tests for package resourceimport pytest
from mock import patch, Mock
from spanky.resources import package
class TestPackageResource(object):
def test_package_install(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
pack.installed = Mock()
pack.installed.return_value = True
pack.install()
expected = [
'apt-get', 'install', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_remove(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'is not installed'
pack.installed = Mock()
pack.installed.return_value = False
pack.remove()
expected = [
'apt-get', 'remove', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_installed(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
assert pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
pack._ok.return_value = 'nginx is not installed'
assert not pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
|
<commit_before><commit_msg>Add unit tests for package resource<commit_after>import pytest
from mock import patch, Mock
from spanky.resources import package
class TestPackageResource(object):
def test_package_install(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
pack.installed = Mock()
pack.installed.return_value = True
pack.install()
expected = [
'apt-get', 'install', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_remove(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'is not installed'
pack.installed = Mock()
pack.installed.return_value = False
pack.remove()
expected = [
'apt-get', 'remove', '-y', 'nginx'
]
pack._ok.assert_called_with(expected)
def test_package_installed(self):
pack = package.Package('nginx')
pack._ok = Mock()
pack._ok.return_value = 'install ok installed'
assert pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
pack._ok.return_value = 'nginx is not installed'
assert not pack.installed()
expected = [
'dpkg', '-s', 'nginx'
]
pack._ok.assert_called_with(expected)
|
|
855972f9e8b59732a65111fb69f1388272727849
|
yatsm/regression/cran.py
|
yatsm/regression/cran.py
|
""" Regression or prediction methods from R
"""
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
|
Move R's spline to regression module
|
Move R's spline to regression module
|
Python
|
mit
|
c11/yatsm,c11/yatsm
|
Move R's spline to regression module
|
""" Regression or prediction methods from R
"""
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
|
<commit_before><commit_msg>Move R's spline to regression module<commit_after>
|
""" Regression or prediction methods from R
"""
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
|
Move R's spline to regression module""" Regression or prediction methods from R
"""
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
|
<commit_before><commit_msg>Move R's spline to regression module<commit_after>""" Regression or prediction methods from R
"""
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
Rstats = importr('stats')
def CRAN_spline(x, y, spar=0.55):
""" Return a prediction function for a smoothing spline from R
Use `rpy2` package to fit a smoothing spline using "smooth.spline".
Args:
x (np.ndarray): independent variable
y (np.ndarray): dependent variable
spar (float): smoothing parameter
Returns:
callable: prediction function of smoothing spline that provides
smoothed estimates of the dependent variable given an input
independent variable array
Example:
Fit a smoothing spline for y ~ x and predict for days in year:
.. code-block:: python
pred_spl = CRAN_spline(x, y)
y_smooth = pred_spl(np.arange(1, 366))
"""
spl = Rstats.smooth_spline(x, y, spar=spar)
return lambda _x: np.array(Rstats.predict_smooth_spline(spl, _x)[1])
|
|
bc1f2adee99e2c20c897e6ec012e4014aecba26b
|
tests/test_models.py
|
tests/test_models.py
|
import unittest
from app import create_app, db
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_user_initialization(self):
user = User(name="foo", social_id="bar")
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
|
Add test for User table initialization
|
Add test for User table initialization
|
Python
|
mit
|
Encrylize/MyDictionary,Encrylize/MyDictionary,Encrylize/MyDictionary
|
Add test for User table initialization
|
import unittest
from app import create_app, db
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_user_initialization(self):
user = User(name="foo", social_id="bar")
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
|
<commit_before><commit_msg>Add test for User table initialization<commit_after>
|
import unittest
from app import create_app, db
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_user_initialization(self):
user = User(name="foo", social_id="bar")
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
|
Add test for User table initializationimport unittest
from app import create_app, db
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_user_initialization(self):
user = User(name="foo", social_id="bar")
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
|
<commit_before><commit_msg>Add test for User table initialization<commit_after>import unittest
from app import create_app, db
from app.models import User
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_user_initialization(self):
user = User(name="foo", social_id="bar")
db.session.add(user)
db.session.commit()
dictionary = user.dictionary
self.assertIsNotNone(user)
self.assertIsNotNone(dictionary)
|
|
604b24cfdf03b657e4a60d3f8dd9e2cf1c10cf5a
|
baseline_knn_mi_webKB.py
|
baseline_knn_mi_webKB.py
|
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
data = pd.read_table("data/WebKB/webkb-train-stemmed.txt")
data.columns=["Y","X"]
classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
Y = np.array([classes[i0] for i0 in data["Y"]])
X = data["X"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X.values.astype('U')).toarray()
x_train = SelectKBest(chi2, k=0.1*len(X[1])).fit_transform(X, Y)
y_train = Y
# data = pd.read_table("data/WebKB/webkb-test-stemmed.txt")
# data.columns=["Y","X"]
# classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
# Y = np.array([classes[i0] for i0 in data["Y"]])
# X = data["X"]
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(X.values.astype('U')).toarray()
# x_test = SelectKBest(chi2, k=0.1*len(X[1]).fit_transform(X, y))
# y_test = Y
knn_model = KNeighborsClassifier(n_neighbors=3, weights='distance', n_jobs=1) # Minkowski distance with p=2
psum = rsum = fsum = 0
# scoring=metrics.make_scorer(metrics.precision_recall_fscore_support)
cv_scores = cross_val_score(knn_model, x_train, y_train, scoring='f1_weighted', cv=5, n_jobs=5)
print(sum(cv_scores)/5)
|
Add inital MI feature selection code
|
Add inital MI feature selection code
|
Python
|
mit
|
achyudhk/Ensemble-Text-Classification
|
Add inital MI feature selection code
|
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
data = pd.read_table("data/WebKB/webkb-train-stemmed.txt")
data.columns=["Y","X"]
classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
Y = np.array([classes[i0] for i0 in data["Y"]])
X = data["X"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X.values.astype('U')).toarray()
x_train = SelectKBest(chi2, k=0.1*len(X[1])).fit_transform(X, Y)
y_train = Y
# data = pd.read_table("data/WebKB/webkb-test-stemmed.txt")
# data.columns=["Y","X"]
# classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
# Y = np.array([classes[i0] for i0 in data["Y"]])
# X = data["X"]
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(X.values.astype('U')).toarray()
# x_test = SelectKBest(chi2, k=0.1*len(X[1]).fit_transform(X, y))
# y_test = Y
knn_model = KNeighborsClassifier(n_neighbors=3, weights='distance', n_jobs=1) # Minkowski distance with p=2
psum = rsum = fsum = 0
# scoring=metrics.make_scorer(metrics.precision_recall_fscore_support)
cv_scores = cross_val_score(knn_model, x_train, y_train, scoring='f1_weighted', cv=5, n_jobs=5)
print(sum(cv_scores)/5)
|
<commit_before><commit_msg>Add inital MI feature selection code<commit_after>
|
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
data = pd.read_table("data/WebKB/webkb-train-stemmed.txt")
data.columns=["Y","X"]
classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
Y = np.array([classes[i0] for i0 in data["Y"]])
X = data["X"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X.values.astype('U')).toarray()
x_train = SelectKBest(chi2, k=0.1*len(X[1])).fit_transform(X, Y)
y_train = Y
# data = pd.read_table("data/WebKB/webkb-test-stemmed.txt")
# data.columns=["Y","X"]
# classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
# Y = np.array([classes[i0] for i0 in data["Y"]])
# X = data["X"]
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(X.values.astype('U')).toarray()
# x_test = SelectKBest(chi2, k=0.1*len(X[1]).fit_transform(X, y))
# y_test = Y
knn_model = KNeighborsClassifier(n_neighbors=3, weights='distance', n_jobs=1) # Minkowski distance with p=2
psum = rsum = fsum = 0
# scoring=metrics.make_scorer(metrics.precision_recall_fscore_support)
cv_scores = cross_val_score(knn_model, x_train, y_train, scoring='f1_weighted', cv=5, n_jobs=5)
print(sum(cv_scores)/5)
|
Add inital MI feature selection codeimport numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
data = pd.read_table("data/WebKB/webkb-train-stemmed.txt")
data.columns=["Y","X"]
classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
Y = np.array([classes[i0] for i0 in data["Y"]])
X = data["X"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X.values.astype('U')).toarray()
x_train = SelectKBest(chi2, k=0.1*len(X[1])).fit_transform(X, Y)
y_train = Y
# data = pd.read_table("data/WebKB/webkb-test-stemmed.txt")
# data.columns=["Y","X"]
# classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
# Y = np.array([classes[i0] for i0 in data["Y"]])
# X = data["X"]
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(X.values.astype('U')).toarray()
# x_test = SelectKBest(chi2, k=0.1*len(X[1]).fit_transform(X, y))
# y_test = Y
knn_model = KNeighborsClassifier(n_neighbors=3, weights='distance', n_jobs=1) # Minkowski distance with p=2
psum = rsum = fsum = 0
# scoring=metrics.make_scorer(metrics.precision_recall_fscore_support)
cv_scores = cross_val_score(knn_model, x_train, y_train, scoring='f1_weighted', cv=5, n_jobs=5)
print(sum(cv_scores)/5)
|
<commit_before><commit_msg>Add inital MI feature selection code<commit_after>import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
data = pd.read_table("data/WebKB/webkb-train-stemmed.txt")
data.columns=["Y","X"]
classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
Y = np.array([classes[i0] for i0 in data["Y"]])
X = data["X"]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X.values.astype('U')).toarray()
x_train = SelectKBest(chi2, k=0.1*len(X[1])).fit_transform(X, Y)
y_train = Y
# data = pd.read_table("data/WebKB/webkb-test-stemmed.txt")
# data.columns=["Y","X"]
# classes = {'project':0, 'faculty':1, 'course':2, 'student':3}
# Y = np.array([classes[i0] for i0 in data["Y"]])
# X = data["X"]
# vectorizer = TfidfVectorizer()
# X = vectorizer.fit_transform(X.values.astype('U')).toarray()
# x_test = SelectKBest(chi2, k=0.1*len(X[1]).fit_transform(X, y))
# y_test = Y
knn_model = KNeighborsClassifier(n_neighbors=3, weights='distance', n_jobs=1) # Minkowski distance with p=2
psum = rsum = fsum = 0
# scoring=metrics.make_scorer(metrics.precision_recall_fscore_support)
cv_scores = cross_val_score(knn_model, x_train, y_train, scoring='f1_weighted', cv=5, n_jobs=5)
print(sum(cv_scores)/5)
|
|
c50700f935c81d8fde838192c6201a513d3ab1cf
|
src/draw_mosaic.py
|
src/draw_mosaic.py
|
import optparse
import fractions
import cairo
import turkshead
width = 8
height = 6
size = 200
img = cairo.ImageSurface( cairo.FORMAT_RGB24, width * size, height * size )
ctx = cairo.Context( img )
ctx.set_source_rgb( 1, 1, 0xBF / 255. )
ctx.paint()
ctx.translate( -size / 2, -size / 2 )
ctx.set_source_rgb( 0, 0, 0 )
for leads in range( 1, width + 1 ):
for bights in range( 1, height + 1 ):
if fractions.gcd( leads, bights ) == 1:
ctx.save()
ctx.translate( size * leads, size * bights )
t = turkshead.TurksHead( leads, bights, size / 8 * 0.9, size / 2 * 0.9, size / 15 )
t.draw( ctx )
ctx.restore()
img.write_to_png( "mosaic.png" )
|
Add a mosaic drawer in Python
|
Add a mosaic drawer in Python
|
Python
|
mit
|
jacquev6/DrawTurksHead,jacquev6/DrawTurksHead,jacquev6/DrawTurksHead
|
Add a mosaic drawer in Python
|
import optparse
import fractions
import cairo
import turkshead
width = 8
height = 6
size = 200
img = cairo.ImageSurface( cairo.FORMAT_RGB24, width * size, height * size )
ctx = cairo.Context( img )
ctx.set_source_rgb( 1, 1, 0xBF / 255. )
ctx.paint()
ctx.translate( -size / 2, -size / 2 )
ctx.set_source_rgb( 0, 0, 0 )
for leads in range( 1, width + 1 ):
for bights in range( 1, height + 1 ):
if fractions.gcd( leads, bights ) == 1:
ctx.save()
ctx.translate( size * leads, size * bights )
t = turkshead.TurksHead( leads, bights, size / 8 * 0.9, size / 2 * 0.9, size / 15 )
t.draw( ctx )
ctx.restore()
img.write_to_png( "mosaic.png" )
|
<commit_before><commit_msg>Add a mosaic drawer in Python<commit_after>
|
import optparse
import fractions
import cairo
import turkshead
width = 8
height = 6
size = 200
img = cairo.ImageSurface( cairo.FORMAT_RGB24, width * size, height * size )
ctx = cairo.Context( img )
ctx.set_source_rgb( 1, 1, 0xBF / 255. )
ctx.paint()
ctx.translate( -size / 2, -size / 2 )
ctx.set_source_rgb( 0, 0, 0 )
for leads in range( 1, width + 1 ):
for bights in range( 1, height + 1 ):
if fractions.gcd( leads, bights ) == 1:
ctx.save()
ctx.translate( size * leads, size * bights )
t = turkshead.TurksHead( leads, bights, size / 8 * 0.9, size / 2 * 0.9, size / 15 )
t.draw( ctx )
ctx.restore()
img.write_to_png( "mosaic.png" )
|
Add a mosaic drawer in Pythonimport optparse
import fractions
import cairo
import turkshead
width = 8
height = 6
size = 200
img = cairo.ImageSurface( cairo.FORMAT_RGB24, width * size, height * size )
ctx = cairo.Context( img )
ctx.set_source_rgb( 1, 1, 0xBF / 255. )
ctx.paint()
ctx.translate( -size / 2, -size / 2 )
ctx.set_source_rgb( 0, 0, 0 )
for leads in range( 1, width + 1 ):
for bights in range( 1, height + 1 ):
if fractions.gcd( leads, bights ) == 1:
ctx.save()
ctx.translate( size * leads, size * bights )
t = turkshead.TurksHead( leads, bights, size / 8 * 0.9, size / 2 * 0.9, size / 15 )
t.draw( ctx )
ctx.restore()
img.write_to_png( "mosaic.png" )
|
<commit_before><commit_msg>Add a mosaic drawer in Python<commit_after>import optparse
import fractions
import cairo
import turkshead
width = 8
height = 6
size = 200
img = cairo.ImageSurface( cairo.FORMAT_RGB24, width * size, height * size )
ctx = cairo.Context( img )
ctx.set_source_rgb( 1, 1, 0xBF / 255. )
ctx.paint()
ctx.translate( -size / 2, -size / 2 )
ctx.set_source_rgb( 0, 0, 0 )
for leads in range( 1, width + 1 ):
for bights in range( 1, height + 1 ):
if fractions.gcd( leads, bights ) == 1:
ctx.save()
ctx.translate( size * leads, size * bights )
t = turkshead.TurksHead( leads, bights, size / 8 * 0.9, size / 2 * 0.9, size / 15 )
t.draw( ctx )
ctx.restore()
img.write_to_png( "mosaic.png" )
|
|
90c6293ef1f719d53e02bd4c6f613cb205fa9497
|
test/test_connection.py
|
test/test_connection.py
|
""" Tests for the Connection module """
import unittest
from src import connection
class TestConnection(unittest.TestCase):
""" Tests for the Connection module """
def test_default_instantiation(self):
""" Test a known default instantiation """
single_connection = connection.Connection()
self.assertFalse(single_connection.startX)
self.assertFalse(single_connection.startY)
self.assertFalse(single_connection.direction)
self.assertFalse(single_connection.endX)
self.assertFalse(single_connection.endY)
def test_setstartX_good(self):
""" Check that startX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartX(123)
self.assertEqual(single_connection.startX, 123)
def test_setstartX_bad(self):
""" Check that setstartX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartX, "abc")
def test_setstartY_good(self):
""" Check that startY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartY(123)
self.assertEqual(single_connection.startY, 123)
def test_setstartY_bad(self):
""" Check that setstartY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartY, "abc")
def test_setdirection_good(self):
""" Check that direction is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setdirection(123)
self.assertEqual(single_connection.direction, 123)
def test_setdirection_bad(self):
""" Check that setdirection raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setdirection, "abc")
def test_setendX_good(self):
""" Check that endX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendX(123)
self.assertEqual(single_connection.endX, 123)
def test_setendX_bad(self):
""" Check that setendX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendX, "abc")
def test_setendY_good(self):
""" Check that endY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendY(123)
self.assertEqual(single_connection.endY, 123)
def test_setendY_bad(self):
""" Check that setendY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendY, "abc")
|
Add tests for connection class
|
Add tests for connection class
|
Python
|
mit
|
blairck/jaeger
|
Add tests for connection class
|
""" Tests for the Connection module """
import unittest
from src import connection
class TestConnection(unittest.TestCase):
""" Tests for the Connection module """
def test_default_instantiation(self):
""" Test a known default instantiation """
single_connection = connection.Connection()
self.assertFalse(single_connection.startX)
self.assertFalse(single_connection.startY)
self.assertFalse(single_connection.direction)
self.assertFalse(single_connection.endX)
self.assertFalse(single_connection.endY)
def test_setstartX_good(self):
""" Check that startX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartX(123)
self.assertEqual(single_connection.startX, 123)
def test_setstartX_bad(self):
""" Check that setstartX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartX, "abc")
def test_setstartY_good(self):
""" Check that startY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartY(123)
self.assertEqual(single_connection.startY, 123)
def test_setstartY_bad(self):
""" Check that setstartY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartY, "abc")
def test_setdirection_good(self):
""" Check that direction is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setdirection(123)
self.assertEqual(single_connection.direction, 123)
def test_setdirection_bad(self):
""" Check that setdirection raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setdirection, "abc")
def test_setendX_good(self):
""" Check that endX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendX(123)
self.assertEqual(single_connection.endX, 123)
def test_setendX_bad(self):
""" Check that setendX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendX, "abc")
def test_setendY_good(self):
""" Check that endY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendY(123)
self.assertEqual(single_connection.endY, 123)
def test_setendY_bad(self):
""" Check that setendY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendY, "abc")
|
<commit_before><commit_msg>Add tests for connection class<commit_after>
|
""" Tests for the Connection module """
import unittest
from src import connection
class TestConnection(unittest.TestCase):
""" Tests for the Connection module """
def test_default_instantiation(self):
""" Test a known default instantiation """
single_connection = connection.Connection()
self.assertFalse(single_connection.startX)
self.assertFalse(single_connection.startY)
self.assertFalse(single_connection.direction)
self.assertFalse(single_connection.endX)
self.assertFalse(single_connection.endY)
def test_setstartX_good(self):
""" Check that startX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartX(123)
self.assertEqual(single_connection.startX, 123)
def test_setstartX_bad(self):
""" Check that setstartX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartX, "abc")
def test_setstartY_good(self):
""" Check that startY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartY(123)
self.assertEqual(single_connection.startY, 123)
def test_setstartY_bad(self):
""" Check that setstartY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartY, "abc")
def test_setdirection_good(self):
""" Check that direction is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setdirection(123)
self.assertEqual(single_connection.direction, 123)
def test_setdirection_bad(self):
""" Check that setdirection raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setdirection, "abc")
def test_setendX_good(self):
""" Check that endX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendX(123)
self.assertEqual(single_connection.endX, 123)
def test_setendX_bad(self):
""" Check that setendX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendX, "abc")
def test_setendY_good(self):
""" Check that endY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendY(123)
self.assertEqual(single_connection.endY, 123)
def test_setendY_bad(self):
""" Check that setendY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendY, "abc")
|
Add tests for connection class""" Tests for the Connection module """
import unittest
from src import connection
class TestConnection(unittest.TestCase):
""" Tests for the Connection module """
def test_default_instantiation(self):
""" Test a known default instantiation """
single_connection = connection.Connection()
self.assertFalse(single_connection.startX)
self.assertFalse(single_connection.startY)
self.assertFalse(single_connection.direction)
self.assertFalse(single_connection.endX)
self.assertFalse(single_connection.endY)
def test_setstartX_good(self):
""" Check that startX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartX(123)
self.assertEqual(single_connection.startX, 123)
def test_setstartX_bad(self):
""" Check that setstartX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartX, "abc")
def test_setstartY_good(self):
""" Check that startY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartY(123)
self.assertEqual(single_connection.startY, 123)
def test_setstartY_bad(self):
""" Check that setstartY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartY, "abc")
def test_setdirection_good(self):
""" Check that direction is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setdirection(123)
self.assertEqual(single_connection.direction, 123)
def test_setdirection_bad(self):
""" Check that setdirection raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setdirection, "abc")
def test_setendX_good(self):
""" Check that endX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendX(123)
self.assertEqual(single_connection.endX, 123)
def test_setendX_bad(self):
""" Check that setendX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendX, "abc")
def test_setendY_good(self):
""" Check that endY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendY(123)
self.assertEqual(single_connection.endY, 123)
def test_setendY_bad(self):
""" Check that setendY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendY, "abc")
|
<commit_before><commit_msg>Add tests for connection class<commit_after>""" Tests for the Connection module """
import unittest
from src import connection
class TestConnection(unittest.TestCase):
""" Tests for the Connection module """
def test_default_instantiation(self):
""" Test a known default instantiation """
single_connection = connection.Connection()
self.assertFalse(single_connection.startX)
self.assertFalse(single_connection.startY)
self.assertFalse(single_connection.direction)
self.assertFalse(single_connection.endX)
self.assertFalse(single_connection.endY)
def test_setstartX_good(self):
""" Check that startX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartX(123)
self.assertEqual(single_connection.startX, 123)
def test_setstartX_bad(self):
""" Check that setstartX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartX, "abc")
def test_setstartY_good(self):
""" Check that startY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setstartY(123)
self.assertEqual(single_connection.startY, 123)
def test_setstartY_bad(self):
""" Check that setstartY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setstartY, "abc")
def test_setdirection_good(self):
""" Check that direction is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setdirection(123)
self.assertEqual(single_connection.direction, 123)
def test_setdirection_bad(self):
""" Check that setdirection raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setdirection, "abc")
def test_setendX_good(self):
""" Check that endX is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendX(123)
self.assertEqual(single_connection.endX, 123)
def test_setendX_bad(self):
""" Check that setendX raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendX, "abc")
def test_setendY_good(self):
""" Check that endY is set correctly with valid input """
single_connection = connection.Connection()
single_connection.setendY(123)
self.assertEqual(single_connection.endY, 123)
def test_setendY_bad(self):
""" Check that setendY raises an error with non-int input """
single_connection = connection.Connection()
self.assertRaises(TypeError, single_connection.setendY, "abc")
|
|
641ebde8628ee9bb85ab0bd675bcecc88c06d171
|
split_fasta_file.py
|
split_fasta_file.py
|
#!/usr/bin/env python
__description__ = "Split a fasta file and generate a file for each entry"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.1"
import argparse
import sys
from Bio import SeqIO
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_fasta")
parser.add_argument("--output_folder", default=".")
parser.add_argument("--prefix", default="",
help="A prefix that is put before the sequence id in the "
"entry and the file name.")
args = parser.parse_args()
seen_id = {}
for record in SeqIO.parse(args.input_fasta, "fasta"):
seq_id = args.prefix + record.id
if seq_id in seen_id:
sys.stderr.write("Error! ID '%s' used before. Stopped.\n" % seq_id)
sys.exit(1)
with open("%s/%s.fa" % (args.output_folder, seq_id), "w") as output_fh:
output_fh.write(">%s\n%s\n" % (seq_id, record.seq))
seen_id[seq_id] = 1
|
Add script to split fasta files
|
Add script to split fasta files
|
Python
|
isc
|
konrad/kuf_bio_scripts
|
Add script to split fasta files
|
#!/usr/bin/env python
__description__ = "Split a fasta file and generate a file for each entry"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.1"
import argparse
import sys
from Bio import SeqIO
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_fasta")
parser.add_argument("--output_folder", default=".")
parser.add_argument("--prefix", default="",
help="A prefix that is put before the sequence id in the "
"entry and the file name.")
args = parser.parse_args()
seen_id = {}
for record in SeqIO.parse(args.input_fasta, "fasta"):
seq_id = args.prefix + record.id
if seq_id in seen_id:
sys.stderr.write("Error! ID '%s' used before. Stopped.\n" % seq_id)
sys.exit(1)
with open("%s/%s.fa" % (args.output_folder, seq_id), "w") as output_fh:
output_fh.write(">%s\n%s\n" % (seq_id, record.seq))
seen_id[seq_id] = 1
|
<commit_before><commit_msg>Add script to split fasta files<commit_after>
|
#!/usr/bin/env python
__description__ = "Split a fasta file and generate a file for each entry"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.1"
import argparse
import sys
from Bio import SeqIO
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_fasta")
parser.add_argument("--output_folder", default=".")
parser.add_argument("--prefix", default="",
help="A prefix that is put before the sequence id in the "
"entry and the file name.")
args = parser.parse_args()
seen_id = {}
for record in SeqIO.parse(args.input_fasta, "fasta"):
seq_id = args.prefix + record.id
if seq_id in seen_id:
sys.stderr.write("Error! ID '%s' used before. Stopped.\n" % seq_id)
sys.exit(1)
with open("%s/%s.fa" % (args.output_folder, seq_id), "w") as output_fh:
output_fh.write(">%s\n%s\n" % (seq_id, record.seq))
seen_id[seq_id] = 1
|
Add script to split fasta files#!/usr/bin/env python
__description__ = "Split a fasta file and generate a file for each entry"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.1"
import argparse
import sys
from Bio import SeqIO
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_fasta")
parser.add_argument("--output_folder", default=".")
parser.add_argument("--prefix", default="",
help="A prefix that is put before the sequence id in the "
"entry and the file name.")
args = parser.parse_args()
seen_id = {}
for record in SeqIO.parse(args.input_fasta, "fasta"):
seq_id = args.prefix + record.id
if seq_id in seen_id:
sys.stderr.write("Error! ID '%s' used before. Stopped.\n" % seq_id)
sys.exit(1)
with open("%s/%s.fa" % (args.output_folder, seq_id), "w") as output_fh:
output_fh.write(">%s\n%s\n" % (seq_id, record.seq))
seen_id[seq_id] = 1
|
<commit_before><commit_msg>Add script to split fasta files<commit_after>#!/usr/bin/env python
__description__ = "Split a fasta file and generate a file for each entry"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.1"
import argparse
import sys
from Bio import SeqIO
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_fasta")
parser.add_argument("--output_folder", default=".")
parser.add_argument("--prefix", default="",
help="A prefix that is put before the sequence id in the "
"entry and the file name.")
args = parser.parse_args()
seen_id = {}
for record in SeqIO.parse(args.input_fasta, "fasta"):
seq_id = args.prefix + record.id
if seq_id in seen_id:
sys.stderr.write("Error! ID '%s' used before. Stopped.\n" % seq_id)
sys.exit(1)
with open("%s/%s.fa" % (args.output_folder, seq_id), "w") as output_fh:
output_fh.write(">%s\n%s\n" % (seq_id, record.seq))
seen_id[seq_id] = 1
|
|
b500e4b0e8ac0088fbff906dfb42e9e604fbc397
|
cardinal/test_exceptions.py
|
cardinal/test_exceptions.py
|
import pytest
import exceptions
def test_exceptions():
# testing exception inheritance
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
Add simple unit tests for exceptions
|
Add simple unit tests for exceptions
|
Python
|
mit
|
BiohZn/Cardinal,JohnMaguire/Cardinal
|
Add simple unit tests for exceptions
|
import pytest
import exceptions
def test_exceptions():
# testing exception inheritance
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
<commit_before><commit_msg>Add simple unit tests for exceptions<commit_after>
|
import pytest
import exceptions
def test_exceptions():
# testing exception inheritance
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
Add simple unit tests for exceptionsimport pytest
import exceptions
def test_exceptions():
# testing exception inheritance
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
<commit_before><commit_msg>Add simple unit tests for exceptions<commit_after>import pytest
import exceptions
def test_exceptions():
# testing exception inheritance
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
|
fc5c95e7480e38cbd3156b1e6b968e50fa98954d
|
Problems/towersOfHanoi.py
|
Problems/towersOfHanoi.py
|
#!/Applications/anaconda/envs/Python3/bin
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
|
Add Towers of Hanoi algorithm and tests
|
Add Towers of Hanoi algorithm and tests
|
Python
|
mit
|
HKuz/Test_Code
|
Add Towers of Hanoi algorithm and tests
|
#!/Applications/anaconda/envs/Python3/bin
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Towers of Hanoi algorithm and tests<commit_after>
|
#!/Applications/anaconda/envs/Python3/bin
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
|
Add Towers of Hanoi algorithm and tests#!/Applications/anaconda/envs/Python3/bin
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Towers of Hanoi algorithm and tests<commit_after>#!/Applications/anaconda/envs/Python3/bin
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
|
|
017840d4b6986ea1b0c43791b76f724e1f4aaf22
|
004.py
|
004.py
|
"""
Project Euler Problem 4
=======================
A palindromic number reads the same both ways. The largest palindrome made
from the product of two 2-digit numbers is 9009 = 91 * 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def largest_palindrome(start , end):
"""
Takes start and stop integers and returns the largest palindrome
product out of all the factors in the range.
"""
palindromes = []
for i in range(start, end + 1):
for j in range(start, end + 1):
m = i * j
if is_palindrome(m):
palindromes.append(m)
return max(palindromes)
def is_palindrome(number):
"""
Takes a integer and returns True if it's palindromic, otherwise
returns False.
"""
return str(number) == str(number)[::-1]
def test_largest_palindrome():
assert largest_palindrome(10, 99) == 9009
def test_is_palindrome():
assert is_palindrome(9009)
assert not is_palindrome(12345)
print(largest_palindrome(100, 999))
|
Add solution and unit tests for problem 4
|
Add solution and unit tests for problem 4
|
Python
|
mit
|
BeataBak/project-euler-problems
|
Add solution and unit tests for problem 4
|
"""
Project Euler Problem 4
=======================
A palindromic number reads the same both ways. The largest palindrome made
from the product of two 2-digit numbers is 9009 = 91 * 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def largest_palindrome(start , end):
"""
Takes start and stop integers and returns the largest palindrome
product out of all the factors in the range.
"""
palindromes = []
for i in range(start, end + 1):
for j in range(start, end + 1):
m = i * j
if is_palindrome(m):
palindromes.append(m)
return max(palindromes)
def is_palindrome(number):
"""
Takes a integer and returns True if it's palindromic, otherwise
returns False.
"""
return str(number) == str(number)[::-1]
def test_largest_palindrome():
assert largest_palindrome(10, 99) == 9009
def test_is_palindrome():
assert is_palindrome(9009)
assert not is_palindrome(12345)
print(largest_palindrome(100, 999))
|
<commit_before><commit_msg>Add solution and unit tests for problem 4<commit_after>
|
"""
Project Euler Problem 4
=======================
A palindromic number reads the same both ways. The largest palindrome made
from the product of two 2-digit numbers is 9009 = 91 * 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def largest_palindrome(start , end):
"""
Takes start and stop integers and returns the largest palindrome
product out of all the factors in the range.
"""
palindromes = []
for i in range(start, end + 1):
for j in range(start, end + 1):
m = i * j
if is_palindrome(m):
palindromes.append(m)
return max(palindromes)
def is_palindrome(number):
"""
Takes a integer and returns True if it's palindromic, otherwise
returns False.
"""
return str(number) == str(number)[::-1]
def test_largest_palindrome():
assert largest_palindrome(10, 99) == 9009
def test_is_palindrome():
assert is_palindrome(9009)
assert not is_palindrome(12345)
print(largest_palindrome(100, 999))
|
Add solution and unit tests for problem 4"""
Project Euler Problem 4
=======================
A palindromic number reads the same both ways. The largest palindrome made
from the product of two 2-digit numbers is 9009 = 91 * 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def largest_palindrome(start , end):
"""
Takes start and stop integers and returns the largest palindrome
product out of all the factors in the range.
"""
palindromes = []
for i in range(start, end + 1):
for j in range(start, end + 1):
m = i * j
if is_palindrome(m):
palindromes.append(m)
return max(palindromes)
def is_palindrome(number):
"""
Takes a integer and returns True if it's palindromic, otherwise
returns False.
"""
return str(number) == str(number)[::-1]
def test_largest_palindrome():
assert largest_palindrome(10, 99) == 9009
def test_is_palindrome():
assert is_palindrome(9009)
assert not is_palindrome(12345)
print(largest_palindrome(100, 999))
|
<commit_before><commit_msg>Add solution and unit tests for problem 4<commit_after>"""
Project Euler Problem 4
=======================
A palindromic number reads the same both ways. The largest palindrome made
from the product of two 2-digit numbers is 9009 = 91 * 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def largest_palindrome(start , end):
"""
Takes start and stop integers and returns the largest palindrome
product out of all the factors in the range.
"""
palindromes = []
for i in range(start, end + 1):
for j in range(start, end + 1):
m = i * j
if is_palindrome(m):
palindromes.append(m)
return max(palindromes)
def is_palindrome(number):
"""
Takes a integer and returns True if it's palindromic, otherwise
returns False.
"""
return str(number) == str(number)[::-1]
def test_largest_palindrome():
assert largest_palindrome(10, 99) == 9009
def test_is_palindrome():
assert is_palindrome(9009)
assert not is_palindrome(12345)
print(largest_palindrome(100, 999))
|
|
6c8136ff03250b41418a415efcfc4717ad73c82c
|
app/twitterappengine.py
|
app/twitterappengine.py
|
import time
from django.utils import simplejson
from google.appengine.ext import db
class _DbCacheEntry(db.Model):
value = db.TextProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now=True)
class DbCache(object):
'''Simple cache on top of Google App engine's datastore'''
def Get(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
return entry.value
else:
return None
def Set(self, key, data):
entry = _DbCacheEntry.get_by_key_name(key)
if not entry:
entry = _DbCacheEntry(
key_name = key,
value = data)
else:
entry.value = data
entry.put()
def GetCachedTime(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
try:
# All cached data must be valid JSON, and if we mistakenly cache
# error response, we should ignore them
data = simplejson.loads(entry.value)
if isinstance(data, dict) and data.has_key('error'):
return None
except:
return None
return time.mktime(entry.timestamp.utctimetuple())
else:
return None
return None
|
Add helper classes for using python-twitter on App Engine (currently just a cache implementation on top of the App Engine datastore).
|
Add helper classes for using python-twitter on App Engine (currently just a
cache implementation on top of the App Engine datastore).
|
Python
|
apache-2.0
|
mihaip/streamspigot,mihaip/streamspigot,mihaip/streamspigot,mihaip/streamspigot
|
Add helper classes for using python-twitter on App Engine (currently just a
cache implementation on top of the App Engine datastore).
|
import time
from django.utils import simplejson
from google.appengine.ext import db
class _DbCacheEntry(db.Model):
value = db.TextProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now=True)
class DbCache(object):
'''Simple cache on top of Google App engine's datastore'''
def Get(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
return entry.value
else:
return None
def Set(self, key, data):
entry = _DbCacheEntry.get_by_key_name(key)
if not entry:
entry = _DbCacheEntry(
key_name = key,
value = data)
else:
entry.value = data
entry.put()
def GetCachedTime(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
try:
# All cached data must be valid JSON, and if we mistakenly cache
# error response, we should ignore them
data = simplejson.loads(entry.value)
if isinstance(data, dict) and data.has_key('error'):
return None
except:
return None
return time.mktime(entry.timestamp.utctimetuple())
else:
return None
return None
|
<commit_before><commit_msg>Add helper classes for using python-twitter on App Engine (currently just a
cache implementation on top of the App Engine datastore).<commit_after>
|
import time
from django.utils import simplejson
from google.appengine.ext import db
class _DbCacheEntry(db.Model):
value = db.TextProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now=True)
class DbCache(object):
'''Simple cache on top of Google App engine's datastore'''
def Get(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
return entry.value
else:
return None
def Set(self, key, data):
entry = _DbCacheEntry.get_by_key_name(key)
if not entry:
entry = _DbCacheEntry(
key_name = key,
value = data)
else:
entry.value = data
entry.put()
def GetCachedTime(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
try:
# All cached data must be valid JSON, and if we mistakenly cache
# error response, we should ignore them
data = simplejson.loads(entry.value)
if isinstance(data, dict) and data.has_key('error'):
return None
except:
return None
return time.mktime(entry.timestamp.utctimetuple())
else:
return None
return None
|
Add helper classes for using python-twitter on App Engine (currently just a
cache implementation on top of the App Engine datastore).import time
from django.utils import simplejson
from google.appengine.ext import db
class _DbCacheEntry(db.Model):
value = db.TextProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now=True)
class DbCache(object):
'''Simple cache on top of Google App engine's datastore'''
def Get(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
return entry.value
else:
return None
def Set(self, key, data):
entry = _DbCacheEntry.get_by_key_name(key)
if not entry:
entry = _DbCacheEntry(
key_name = key,
value = data)
else:
entry.value = data
entry.put()
def GetCachedTime(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
try:
# All cached data must be valid JSON, and if we mistakenly cache
# error response, we should ignore them
data = simplejson.loads(entry.value)
if isinstance(data, dict) and data.has_key('error'):
return None
except:
return None
return time.mktime(entry.timestamp.utctimetuple())
else:
return None
return None
|
<commit_before><commit_msg>Add helper classes for using python-twitter on App Engine (currently just a
cache implementation on top of the App Engine datastore).<commit_after>import time
from django.utils import simplejson
from google.appengine.ext import db
class _DbCacheEntry(db.Model):
value = db.TextProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now=True)
class DbCache(object):
'''Simple cache on top of Google App engine's datastore'''
def Get(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
return entry.value
else:
return None
def Set(self, key, data):
entry = _DbCacheEntry.get_by_key_name(key)
if not entry:
entry = _DbCacheEntry(
key_name = key,
value = data)
else:
entry.value = data
entry.put()
def GetCachedTime(self, key):
entry = _DbCacheEntry.get_by_key_name(key)
if entry:
try:
# All cached data must be valid JSON, and if we mistakenly cache
# error response, we should ignore them
data = simplejson.loads(entry.value)
if isinstance(data, dict) and data.has_key('error'):
return None
except:
return None
return time.mktime(entry.timestamp.utctimetuple())
else:
return None
return None
|
|
d6a8b8a518727a50f9dbe41f5874061b5ae6bc33
|
hyperion/model/tests/test_fortran.py
|
hyperion/model/tests/test_fortran.py
|
import pytest
import numpy as np
from .. import Model
from .test_helpers import random_filename, get_test_dust
def test_point_source_outside_grid():
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(random_filename())
log_file = random_filename()
with pytest.raises(SystemExit) as exc:
m.run(random_filename(), logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
|
Add test to check that error is properly raised when a photon is emitted outside the grid.
|
Add test to check that error is properly raised when a photon is emitted outside the grid.
|
Python
|
bsd-2-clause
|
bluescarni/hyperion,hyperion-rt/hyperion,astrofrog/hyperion,hyperion-rt/hyperion,hyperion-rt/hyperion,bluescarni/hyperion,astrofrog/hyperion
|
Add test to check that error is properly raised when a photon is emitted outside the grid.
|
import pytest
import numpy as np
from .. import Model
from .test_helpers import random_filename, get_test_dust
def test_point_source_outside_grid():
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(random_filename())
log_file = random_filename()
with pytest.raises(SystemExit) as exc:
m.run(random_filename(), logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
|
<commit_before><commit_msg>Add test to check that error is properly raised when a photon is emitted outside the grid.<commit_after>
|
import pytest
import numpy as np
from .. import Model
from .test_helpers import random_filename, get_test_dust
def test_point_source_outside_grid():
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(random_filename())
log_file = random_filename()
with pytest.raises(SystemExit) as exc:
m.run(random_filename(), logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
|
Add test to check that error is properly raised when a photon is emitted outside the grid.import pytest
import numpy as np
from .. import Model
from .test_helpers import random_filename, get_test_dust
def test_point_source_outside_grid():
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(random_filename())
log_file = random_filename()
with pytest.raises(SystemExit) as exc:
m.run(random_filename(), logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
|
<commit_before><commit_msg>Add test to check that error is properly raised when a photon is emitted outside the grid.<commit_after>import pytest
import numpy as np
from .. import Model
from .test_helpers import random_filename, get_test_dust
def test_point_source_outside_grid():
dust = get_test_dust()
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), dust)
m.set_n_photons(initial=100, imaging=0)
s = m.add_point_source()
s.position = (-1.5, 0., 0.)
s.temperature = 5000.
s.luminosity = 1.
m.write(random_filename())
log_file = random_filename()
with pytest.raises(SystemExit) as exc:
m.run(random_filename(), logfile=log_file)
assert exc.value.args[0] == 'An error occurred, and the run did not ' + \
'complete'
assert 'photon was not emitted inside a cell' in open(log_file).read()
|
|
274d6a95cafb882ef422a46c2e5f91739b368c6f
|
sugar/context_processors.py
|
sugar/context_processors.py
|
# encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def site_settings(request):
"""Expose common Django settings to templates"""
context = {
'CURRENT_SITE': Site.objects.get_current(),
}
for k in ('DEBUG', 'LOCAL_DEV', 'VERSION', 'MEDIA_URL', 'STATIC_URL', 'MEDIA_KEY'):
if hasattr(settings, k):
context[k] = getattr(settings, k)
return context
|
Add a basic context processor which exposes common settings
|
Add a basic context processor which exposes common settings
|
Python
|
bsd-3-clause
|
acdha/django-sugar
|
Add a basic context processor which exposes common settings
|
# encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def site_settings(request):
"""Expose common Django settings to templates"""
context = {
'CURRENT_SITE': Site.objects.get_current(),
}
for k in ('DEBUG', 'LOCAL_DEV', 'VERSION', 'MEDIA_URL', 'STATIC_URL', 'MEDIA_KEY'):
if hasattr(settings, k):
context[k] = getattr(settings, k)
return context
|
<commit_before><commit_msg>Add a basic context processor which exposes common settings<commit_after>
|
# encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def site_settings(request):
"""Expose common Django settings to templates"""
context = {
'CURRENT_SITE': Site.objects.get_current(),
}
for k in ('DEBUG', 'LOCAL_DEV', 'VERSION', 'MEDIA_URL', 'STATIC_URL', 'MEDIA_KEY'):
if hasattr(settings, k):
context[k] = getattr(settings, k)
return context
|
Add a basic context processor which exposes common settings# encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def site_settings(request):
"""Expose common Django settings to templates"""
context = {
'CURRENT_SITE': Site.objects.get_current(),
}
for k in ('DEBUG', 'LOCAL_DEV', 'VERSION', 'MEDIA_URL', 'STATIC_URL', 'MEDIA_KEY'):
if hasattr(settings, k):
context[k] = getattr(settings, k)
return context
|
<commit_before><commit_msg>Add a basic context processor which exposes common settings<commit_after># encoding: utf-8
from django.conf import settings
from django.contrib.sites.models import Site
def site_settings(request):
"""Expose common Django settings to templates"""
context = {
'CURRENT_SITE': Site.objects.get_current(),
}
for k in ('DEBUG', 'LOCAL_DEV', 'VERSION', 'MEDIA_URL', 'STATIC_URL', 'MEDIA_KEY'):
if hasattr(settings, k):
context[k] = getattr(settings, k)
return context
|
|
e8688d666ae2ccbd06fc5e2811789886cbd1d5f2
|
harvester/post_processing/fix_repeated_displayDate.py
|
harvester/post_processing/fix_repeated_displayDate.py
|
# -*- coding: utf-8 -*-
import re
r=re.compile(r'(\d\d\d\d)-\1')
def fix_repeated_date(doc):
dates = doc['sourceResource'].get('date', None)
if dates:
if isinstance(dates, list):
new_dates = []
for d in dates:
disp_date = d.get('displayDate', '')
if r.match(disp_date):
new_dd = r.match(disp_date).group(1)
d['displayDate'] = new_dd
new_dates.append(d)
doc['sourceResource']['date'] = new_dates
return doc
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
Fix for display date repeat of YYYY
|
Fix for display date repeat of YYYY
|
Python
|
bsd-3-clause
|
mredar/harvester,barbarahui/harvester,barbarahui/harvester,ucldc/harvester,ucldc/harvester,mredar/harvester
|
Fix for display date repeat of YYYY
|
# -*- coding: utf-8 -*-
import re
r=re.compile(r'(\d\d\d\d)-\1')
def fix_repeated_date(doc):
dates = doc['sourceResource'].get('date', None)
if dates:
if isinstance(dates, list):
new_dates = []
for d in dates:
disp_date = d.get('displayDate', '')
if r.match(disp_date):
new_dd = r.match(disp_date).group(1)
d['displayDate'] = new_dd
new_dates.append(d)
doc['sourceResource']['date'] = new_dates
return doc
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
<commit_before><commit_msg>Fix for display date repeat of YYYY<commit_after>
|
# -*- coding: utf-8 -*-
import re
r=re.compile(r'(\d\d\d\d)-\1')
def fix_repeated_date(doc):
dates = doc['sourceResource'].get('date', None)
if dates:
if isinstance(dates, list):
new_dates = []
for d in dates:
disp_date = d.get('displayDate', '')
if r.match(disp_date):
new_dd = r.match(disp_date).group(1)
d['displayDate'] = new_dd
new_dates.append(d)
doc['sourceResource']['date'] = new_dates
return doc
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
Fix for display date repeat of YYYY# -*- coding: utf-8 -*-
import re
r=re.compile(r'(\d\d\d\d)-\1')
def fix_repeated_date(doc):
dates = doc['sourceResource'].get('date', None)
if dates:
if isinstance(dates, list):
new_dates = []
for d in dates:
disp_date = d.get('displayDate', '')
if r.match(disp_date):
new_dd = r.match(disp_date).group(1)
d['displayDate'] = new_dd
new_dates.append(d)
doc['sourceResource']['date'] = new_dates
return doc
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
<commit_before><commit_msg>Fix for display date repeat of YYYY<commit_after># -*- coding: utf-8 -*-
import re
r=re.compile(r'(\d\d\d\d)-\1')
def fix_repeated_date(doc):
dates = doc['sourceResource'].get('date', None)
if dates:
if isinstance(dates, list):
new_dates = []
for d in dates:
disp_date = d.get('displayDate', '')
if r.match(disp_date):
new_dd = r.match(disp_date).group(1)
d['displayDate'] = new_dd
new_dates.append(d)
doc['sourceResource']['date'] = new_dates
return doc
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
|
77f15c5b055bc9ba04b0857f5a34b2e537d2c158
|
test/6785/norequire_test.py
|
test/6785/norequire_test.py
|
import unittest
import checksieve
class TestNoRequire(unittest.TestCase):
def test_no_require(self):
sieve = '''
require ["copy", "environment"];
if anyof (environment :is "imap.cause" "APPEND",
environment :is "imap.cause" "COPY") {
if environment :is "imap.mailbox" "ActionItems" {
redirect :copy "actionitems@example.com";
}
}
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
Add failing test validating missing imapsieve capability.
|
Add failing test validating missing imapsieve capability.
Issue #41
|
Python
|
mit
|
dburkart/check-sieve,dburkart/check-sieve,dburkart/check-sieve
|
Add failing test validating missing imapsieve capability.
Issue #41
|
import unittest
import checksieve
class TestNoRequire(unittest.TestCase):
def test_no_require(self):
sieve = '''
require ["copy", "environment"];
if anyof (environment :is "imap.cause" "APPEND",
environment :is "imap.cause" "COPY") {
if environment :is "imap.mailbox" "ActionItems" {
redirect :copy "actionitems@example.com";
}
}
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add failing test validating missing imapsieve capability.
Issue #41<commit_after>
|
import unittest
import checksieve
class TestNoRequire(unittest.TestCase):
def test_no_require(self):
sieve = '''
require ["copy", "environment"];
if anyof (environment :is "imap.cause" "APPEND",
environment :is "imap.cause" "COPY") {
if environment :is "imap.mailbox" "ActionItems" {
redirect :copy "actionitems@example.com";
}
}
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
Add failing test validating missing imapsieve capability.
Issue #41import unittest
import checksieve
class TestNoRequire(unittest.TestCase):
def test_no_require(self):
sieve = '''
require ["copy", "environment"];
if anyof (environment :is "imap.cause" "APPEND",
environment :is "imap.cause" "COPY") {
if environment :is "imap.mailbox" "ActionItems" {
redirect :copy "actionitems@example.com";
}
}
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add failing test validating missing imapsieve capability.
Issue #41<commit_after>import unittest
import checksieve
class TestNoRequire(unittest.TestCase):
def test_no_require(self):
sieve = '''
require ["copy", "environment"];
if anyof (environment :is "imap.cause" "APPEND",
environment :is "imap.cause" "COPY") {
if environment :is "imap.mailbox" "ActionItems" {
redirect :copy "actionitems@example.com";
}
}
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
|
827dbf8a97cf42d1571fa5eca0b3726027d80d53
|
lsv_compassion/model/invoice_line.py
|
lsv_compassion/model/invoice_line.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
Add missing file in previous commit.
|
Add missing file in previous commit.
|
Python
|
agpl-3.0
|
MickSandoz/compassion-switzerland,eicher31/compassion-switzerland,ndtran/compassion-switzerland,eicher31/compassion-switzerland,MickSandoz/compassion-switzerland,Secheron/compassion-switzerland,ecino/compassion-switzerland,ecino/compassion-switzerland,CompassionCH/compassion-switzerland,ecino/compassion-switzerland,Secheron/compassion-switzerland,ndtran/compassion-switzerland,eicher31/compassion-switzerland,CompassionCH/compassion-switzerland,CompassionCH/compassion-switzerland
|
Add missing file in previous commit.
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
<commit_before><commit_msg>Add missing file in previous commit.<commit_after>
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
Add missing file in previous commit.# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
<commit_before><commit_msg>Add missing file in previous commit.<commit_after># -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.osv import orm, fields
class invoice_line(orm.Model):
_inherit = 'account.invoice.line'
def _get_child_name(self, cr, uid, ids, name, dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context):
child_name = ''
if line.contract_id and line.contract_id.child_id:
child_name = line.contract_id.child_id.name
res[line.id] = child_name
return res
_columns = {
'child_name': fields.function(
_get_child_name, string='Child name', type='char')
}
|
|
06b1344fe949cf0d7c53e56a1f42e281571dbfea
|
get_fib.py
|
get_fib.py
|
"""Implement a function recursivly to get the desired
Fibonacci sequence value.
Your code should have the same input/output as the
iterative code in the instructions."""
def get_fib(position):
""" Algorithm:
1. If position matches with base cases(0 and 1) then
return the position. i.e.
1.1 If position == 0
return 0
1.2 If position == 1
return 1
2. Else return addition of previous two values
"""
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
return -1
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
|
Add recursive also for fibonnaci
|
Add recursive also for fibonnaci
|
Python
|
mit
|
rav1n/basic-algorithms
|
Add recursive also for fibonnaci
|
"""Implement a function recursivly to get the desired
Fibonacci sequence value.
Your code should have the same input/output as the
iterative code in the instructions."""
def get_fib(position):
""" Algorithm:
1. If position matches with base cases(0 and 1) then
return the position. i.e.
1.1 If position == 0
return 0
1.2 If position == 1
return 1
2. Else return addition of previous two values
"""
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
return -1
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
|
<commit_before><commit_msg>Add recursive also for fibonnaci<commit_after>
|
"""Implement a function recursivly to get the desired
Fibonacci sequence value.
Your code should have the same input/output as the
iterative code in the instructions."""
def get_fib(position):
""" Algorithm:
1. If position matches with base cases(0 and 1) then
return the position. i.e.
1.1 If position == 0
return 0
1.2 If position == 1
return 1
2. Else return addition of previous two values
"""
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
return -1
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
|
Add recursive also for fibonnaci"""Implement a function recursivly to get the desired
Fibonacci sequence value.
Your code should have the same input/output as the
iterative code in the instructions."""
def get_fib(position):
""" Algorithm:
1. If position matches with base cases(0 and 1) then
return the position. i.e.
1.1 If position == 0
return 0
1.2 If position == 1
return 1
2. Else return addition of previous two values
"""
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
return -1
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
|
<commit_before><commit_msg>Add recursive also for fibonnaci<commit_after>"""Implement a function recursivly to get the desired
Fibonacci sequence value.
Your code should have the same input/output as the
iterative code in the instructions."""
def get_fib(position):
""" Algorithm:
1. If position matches with base cases(0 and 1) then
return the position. i.e.
1.1 If position == 0
return 0
1.2 If position == 1
return 1
2. Else return addition of previous two values
"""
if position == 0 or position == 1:
return position
else:
return get_fib(position - 1) + get_fib(position - 2)
return -1
# Test cases
print get_fib(9)
print get_fib(11)
print get_fib(0)
|
|
083549a49dfe242efdc631de3b698b2db1bf5119
|
robot/robot/tests/autonomous_test.py
|
robot/robot/tests/autonomous_test.py
|
'''
Runs all of the autonomous modes in a basic way
'''
def test_all_autonomous(robot, wpilib, fake_time):
autonomous_chooser = wpilib.SmartDashboard._table.data['Autonomous Mode']
auto_tm = 10
tm_limit = auto_tm
for choice in autonomous_chooser.choices.keys():
# set the mode
autonomous_chooser.selected = choice
# run autonomous mode for 10 seconds
wpilib.internal.enabled = True
wpilib.internal.on_IsAutonomous = lambda tm: tm < tm_limit
robot.Autonomous()
# make sure autonomous mode ran for the entire time, and
# didn't exit early
assert int(fake_time.Get()) == tm_limit
tm_limit += auto_tm
|
Add simplistic test to run through all autonomous modes
|
Add simplistic test to run through all autonomous modes
|
Python
|
bsd-3-clause
|
frc1418/2014
|
Add simplistic test to run through all autonomous modes
|
'''
Runs all of the autonomous modes in a basic way
'''
def test_all_autonomous(robot, wpilib, fake_time):
autonomous_chooser = wpilib.SmartDashboard._table.data['Autonomous Mode']
auto_tm = 10
tm_limit = auto_tm
for choice in autonomous_chooser.choices.keys():
# set the mode
autonomous_chooser.selected = choice
# run autonomous mode for 10 seconds
wpilib.internal.enabled = True
wpilib.internal.on_IsAutonomous = lambda tm: tm < tm_limit
robot.Autonomous()
# make sure autonomous mode ran for the entire time, and
# didn't exit early
assert int(fake_time.Get()) == tm_limit
tm_limit += auto_tm
|
<commit_before><commit_msg>Add simplistic test to run through all autonomous modes<commit_after>
|
'''
Runs all of the autonomous modes in a basic way
'''
def test_all_autonomous(robot, wpilib, fake_time):
autonomous_chooser = wpilib.SmartDashboard._table.data['Autonomous Mode']
auto_tm = 10
tm_limit = auto_tm
for choice in autonomous_chooser.choices.keys():
# set the mode
autonomous_chooser.selected = choice
# run autonomous mode for 10 seconds
wpilib.internal.enabled = True
wpilib.internal.on_IsAutonomous = lambda tm: tm < tm_limit
robot.Autonomous()
# make sure autonomous mode ran for the entire time, and
# didn't exit early
assert int(fake_time.Get()) == tm_limit
tm_limit += auto_tm
|
Add simplistic test to run through all autonomous modes'''
Runs all of the autonomous modes in a basic way
'''
def test_all_autonomous(robot, wpilib, fake_time):
autonomous_chooser = wpilib.SmartDashboard._table.data['Autonomous Mode']
auto_tm = 10
tm_limit = auto_tm
for choice in autonomous_chooser.choices.keys():
# set the mode
autonomous_chooser.selected = choice
# run autonomous mode for 10 seconds
wpilib.internal.enabled = True
wpilib.internal.on_IsAutonomous = lambda tm: tm < tm_limit
robot.Autonomous()
# make sure autonomous mode ran for the entire time, and
# didn't exit early
assert int(fake_time.Get()) == tm_limit
tm_limit += auto_tm
|
<commit_before><commit_msg>Add simplistic test to run through all autonomous modes<commit_after>'''
Runs all of the autonomous modes in a basic way
'''
def test_all_autonomous(robot, wpilib, fake_time):
autonomous_chooser = wpilib.SmartDashboard._table.data['Autonomous Mode']
auto_tm = 10
tm_limit = auto_tm
for choice in autonomous_chooser.choices.keys():
# set the mode
autonomous_chooser.selected = choice
# run autonomous mode for 10 seconds
wpilib.internal.enabled = True
wpilib.internal.on_IsAutonomous = lambda tm: tm < tm_limit
robot.Autonomous()
# make sure autonomous mode ran for the entire time, and
# didn't exit early
assert int(fake_time.Get()) == tm_limit
tm_limit += auto_tm
|
|
e7c48871ee7d7132155697ac44fc31a29dcb00e1
|
CodeFights/twinsScore.py
|
CodeFights/twinsScore.py
|
#!/usr/local/bin/python
# Code Fights Twins Score Problem
def twinsScore(b, m):
return [sum(item) for item in zip(b, m)]
def main():
tests = [
[[22, 13, 45, 32], [28, 41, 13, 32], [50, 54, 58, 64]],
[[0, 0, 0], [100, 100, 100], [100, 100, 100]],
[[42], [42], [84]],
[[46, 22, 2, 83, 15, 46, 98], [28, 33, 91, 71, 77, 35, 5],
[74, 55, 93, 154, 92, 81, 103]],
[[73, 5, 69, 88, 53, 8, 25, 52, 18, 61],
[97, 61, 69, 10, 11, 13, 72, 3, 57, 47],
[170, 66, 138, 98, 64, 21, 97, 55, 75, 108]]
]
for t in tests:
res = twinsScore(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: twinsScore({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: twinsScore({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights twins score problem
|
Solve Code Fights twins score problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights twins score problem
|
#!/usr/local/bin/python
# Code Fights Twins Score Problem
def twinsScore(b, m):
return [sum(item) for item in zip(b, m)]
def main():
tests = [
[[22, 13, 45, 32], [28, 41, 13, 32], [50, 54, 58, 64]],
[[0, 0, 0], [100, 100, 100], [100, 100, 100]],
[[42], [42], [84]],
[[46, 22, 2, 83, 15, 46, 98], [28, 33, 91, 71, 77, 35, 5],
[74, 55, 93, 154, 92, 81, 103]],
[[73, 5, 69, 88, 53, 8, 25, 52, 18, 61],
[97, 61, 69, 10, 11, 13, 72, 3, 57, 47],
[170, 66, 138, 98, 64, 21, 97, 55, 75, 108]]
]
for t in tests:
res = twinsScore(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: twinsScore({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: twinsScore({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights twins score problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Twins Score Problem
def twinsScore(b, m):
return [sum(item) for item in zip(b, m)]
def main():
tests = [
[[22, 13, 45, 32], [28, 41, 13, 32], [50, 54, 58, 64]],
[[0, 0, 0], [100, 100, 100], [100, 100, 100]],
[[42], [42], [84]],
[[46, 22, 2, 83, 15, 46, 98], [28, 33, 91, 71, 77, 35, 5],
[74, 55, 93, 154, 92, 81, 103]],
[[73, 5, 69, 88, 53, 8, 25, 52, 18, 61],
[97, 61, 69, 10, 11, 13, 72, 3, 57, 47],
[170, 66, 138, 98, 64, 21, 97, 55, 75, 108]]
]
for t in tests:
res = twinsScore(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: twinsScore({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: twinsScore({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights twins score problem#!/usr/local/bin/python
# Code Fights Twins Score Problem
def twinsScore(b, m):
return [sum(item) for item in zip(b, m)]
def main():
tests = [
[[22, 13, 45, 32], [28, 41, 13, 32], [50, 54, 58, 64]],
[[0, 0, 0], [100, 100, 100], [100, 100, 100]],
[[42], [42], [84]],
[[46, 22, 2, 83, 15, 46, 98], [28, 33, 91, 71, 77, 35, 5],
[74, 55, 93, 154, 92, 81, 103]],
[[73, 5, 69, 88, 53, 8, 25, 52, 18, 61],
[97, 61, 69, 10, 11, 13, 72, 3, 57, 47],
[170, 66, 138, 98, 64, 21, 97, 55, 75, 108]]
]
for t in tests:
res = twinsScore(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: twinsScore({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: twinsScore({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights twins score problem<commit_after>#!/usr/local/bin/python
# Code Fights Twins Score Problem
def twinsScore(b, m):
return [sum(item) for item in zip(b, m)]
def main():
tests = [
[[22, 13, 45, 32], [28, 41, 13, 32], [50, 54, 58, 64]],
[[0, 0, 0], [100, 100, 100], [100, 100, 100]],
[[42], [42], [84]],
[[46, 22, 2, 83, 15, 46, 98], [28, 33, 91, 71, 77, 35, 5],
[74, 55, 93, 154, 92, 81, 103]],
[[73, 5, 69, 88, 53, 8, 25, 52, 18, 61],
[97, 61, 69, 10, 11, 13, 72, 3, 57, 47],
[170, 66, 138, 98, 64, 21, 97, 55, 75, 108]]
]
for t in tests:
res = twinsScore(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: twinsScore({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: twinsScore({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
d38ea61fc5ffa7c526094900a0dbb8c7405aac7b
|
indra/db/get_db_stats.py
|
indra/db/get_db_stats.py
|
import boto3
from indra.db.util import get_db_statistics
from datetime import datetime
def main():
utcnow = datetime.utcnow()
fname = "Primary_Database_Status_Report_%s.txt" % utcnow.strftime("%Y%m%d")
print("Creating report in: %s." % fname)
print("\nBegin Report============\n")
get_db_statistics(fname)
print("\nEnd Report==============\n")
print("Saving record to s3.")
s3 = boto3.client('s3')
with open(fname, 'rb') as f:
s3.put_objec(Body=f, Bucket='bigmech', Key='indra-db/reports/%s' % fname)
return
if __name__ == '__main__':
main()
|
Create script to run stats on aws.
|
Create script to run stats on aws.
|
Python
|
bsd-2-clause
|
pvtodorov/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/indra,pvtodorov/indra,pvtodorov/indra,sorgerlab/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,bgyori/indra,bgyori/indra,johnbachman/indra
|
Create script to run stats on aws.
|
import boto3
from indra.db.util import get_db_statistics
from datetime import datetime
def main():
utcnow = datetime.utcnow()
fname = "Primary_Database_Status_Report_%s.txt" % utcnow.strftime("%Y%m%d")
print("Creating report in: %s." % fname)
print("\nBegin Report============\n")
get_db_statistics(fname)
print("\nEnd Report==============\n")
print("Saving record to s3.")
s3 = boto3.client('s3')
with open(fname, 'rb') as f:
s3.put_objec(Body=f, Bucket='bigmech', Key='indra-db/reports/%s' % fname)
return
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create script to run stats on aws.<commit_after>
|
import boto3
from indra.db.util import get_db_statistics
from datetime import datetime
def main():
utcnow = datetime.utcnow()
fname = "Primary_Database_Status_Report_%s.txt" % utcnow.strftime("%Y%m%d")
print("Creating report in: %s." % fname)
print("\nBegin Report============\n")
get_db_statistics(fname)
print("\nEnd Report==============\n")
print("Saving record to s3.")
s3 = boto3.client('s3')
with open(fname, 'rb') as f:
s3.put_objec(Body=f, Bucket='bigmech', Key='indra-db/reports/%s' % fname)
return
if __name__ == '__main__':
main()
|
Create script to run stats on aws.import boto3
from indra.db.util import get_db_statistics
from datetime import datetime
def main():
utcnow = datetime.utcnow()
fname = "Primary_Database_Status_Report_%s.txt" % utcnow.strftime("%Y%m%d")
print("Creating report in: %s." % fname)
print("\nBegin Report============\n")
get_db_statistics(fname)
print("\nEnd Report==============\n")
print("Saving record to s3.")
s3 = boto3.client('s3')
with open(fname, 'rb') as f:
s3.put_objec(Body=f, Bucket='bigmech', Key='indra-db/reports/%s' % fname)
return
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create script to run stats on aws.<commit_after>import boto3
from indra.db.util import get_db_statistics
from datetime import datetime
def main():
utcnow = datetime.utcnow()
fname = "Primary_Database_Status_Report_%s.txt" % utcnow.strftime("%Y%m%d")
print("Creating report in: %s." % fname)
print("\nBegin Report============\n")
get_db_statistics(fname)
print("\nEnd Report==============\n")
print("Saving record to s3.")
s3 = boto3.client('s3')
with open(fname, 'rb') as f:
s3.put_objec(Body=f, Bucket='bigmech', Key='indra-db/reports/%s' % fname)
return
if __name__ == '__main__':
main()
|
|
a4dd889a44cf7b4ea4e2e85880343ede234ec60c
|
geotrek/core/migrations/0017_remove_path_from_factories.py
|
geotrek/core/migrations/0017_remove_path_from_factories.py
|
# Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_paths_factories(apps, schema_editor):
PathModel = apps.get_model('core', 'Path')
PathModel.objects.filter(geom=LineString(Point(700000, 6600000), Point(700100, 6600100), srid=settings.SRID)).delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20200406_1340'),
]
operations = [
migrations.RunPython(remove_generated_paths_factories)
]
|
Add migration remove generated paths
|
Add migration remove generated paths
|
Python
|
bsd-2-clause
|
GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
|
Add migration remove generated paths
|
# Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_paths_factories(apps, schema_editor):
PathModel = apps.get_model('core', 'Path')
PathModel.objects.filter(geom=LineString(Point(700000, 6600000), Point(700100, 6600100), srid=settings.SRID)).delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20200406_1340'),
]
operations = [
migrations.RunPython(remove_generated_paths_factories)
]
|
<commit_before><commit_msg>Add migration remove generated paths<commit_after>
|
# Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_paths_factories(apps, schema_editor):
PathModel = apps.get_model('core', 'Path')
PathModel.objects.filter(geom=LineString(Point(700000, 6600000), Point(700100, 6600100), srid=settings.SRID)).delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20200406_1340'),
]
operations = [
migrations.RunPython(remove_generated_paths_factories)
]
|
Add migration remove generated paths# Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_paths_factories(apps, schema_editor):
PathModel = apps.get_model('core', 'Path')
PathModel.objects.filter(geom=LineString(Point(700000, 6600000), Point(700100, 6600100), srid=settings.SRID)).delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20200406_1340'),
]
operations = [
migrations.RunPython(remove_generated_paths_factories)
]
|
<commit_before><commit_msg>Add migration remove generated paths<commit_after># Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_paths_factories(apps, schema_editor):
PathModel = apps.get_model('core', 'Path')
PathModel.objects.filter(geom=LineString(Point(700000, 6600000), Point(700100, 6600100), srid=settings.SRID)).delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20200406_1340'),
]
operations = [
migrations.RunPython(remove_generated_paths_factories)
]
|
|
64141d4e195000289b005028f7368452698c701c
|
tests/test_tests.py
|
tests/test_tests.py
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
def test_nothing():
# Do-nothing test for the attribute access test
pass
def test_attribute_missing():
# Verify that missing attributes on tests raise the correct
# exception
with assert_raises(AttributeError):
dummy = test_nothing._dt_dtest.missing_attr
|
Add a test to ensure access to missing attributes raises the correct exception
|
Add a test to ensure access to missing attributes raises the correct
exception
|
Python
|
apache-2.0
|
klmitch/dtest,klmitch/dtest
|
Add a test to ensure access to missing attributes raises the correct
exception
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
def test_nothing():
# Do-nothing test for the attribute access test
pass
def test_attribute_missing():
# Verify that missing attributes on tests raise the correct
# exception
with assert_raises(AttributeError):
dummy = test_nothing._dt_dtest.missing_attr
|
<commit_before><commit_msg>Add a test to ensure access to missing attributes raises the correct
exception<commit_after>
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
def test_nothing():
# Do-nothing test for the attribute access test
pass
def test_attribute_missing():
# Verify that missing attributes on tests raise the correct
# exception
with assert_raises(AttributeError):
dummy = test_nothing._dt_dtest.missing_attr
|
Add a test to ensure access to missing attributes raises the correct
exception# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
def test_nothing():
# Do-nothing test for the attribute access test
pass
def test_attribute_missing():
# Verify that missing attributes on tests raise the correct
# exception
with assert_raises(AttributeError):
dummy = test_nothing._dt_dtest.missing_attr
|
<commit_before><commit_msg>Add a test to ensure access to missing attributes raises the correct
exception<commit_after># Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
def test_nothing():
# Do-nothing test for the attribute access test
pass
def test_attribute_missing():
# Verify that missing attributes on tests raise the correct
# exception
with assert_raises(AttributeError):
dummy = test_nothing._dt_dtest.missing_attr
|
|
74217c0a7db82fa0df153e68e22395ee1e820071
|
PyGdbUtil.py
|
PyGdbUtil.py
|
# coding=utf-8
import os
"""
公用方法
"""
"""
日志
level: 级别
msg: 信息
"""
log_limit_level = 2
def log(level, msg, limit_level=log_limit_level):
if 2-level > log_limit_level:
return
if level == 0: # [info]
print "[info] " + str(msg)
elif level == 1:
print "[warn] " + str(msg)
elif level == 2:
print "[error] " + str(msg)
exit()
"""
编译 C 程序
file_path: 文件路径
compile_args: 编译参数
"""
def compile(file_path, compile_args):
log(0, '正在编译: ' + file_path)
"""
判断文件是否存在
"""
def exist_file(file_path):
return os.path.exists(file_path)
|
Add util & lots of funciton
|
Add util & lots of funciton
|
Python
|
mit
|
Jecvay/PyGDB
|
Add util & lots of funciton
|
# coding=utf-8
import os
"""
公用方法
"""
"""
日志
level: 级别
msg: 信息
"""
log_limit_level = 2
def log(level, msg, limit_level=log_limit_level):
if 2-level > log_limit_level:
return
if level == 0: # [info]
print "[info] " + str(msg)
elif level == 1:
print "[warn] " + str(msg)
elif level == 2:
print "[error] " + str(msg)
exit()
"""
编译 C 程序
file_path: 文件路径
compile_args: 编译参数
"""
def compile(file_path, compile_args):
log(0, '正在编译: ' + file_path)
"""
判断文件是否存在
"""
def exist_file(file_path):
return os.path.exists(file_path)
|
<commit_before><commit_msg>Add util & lots of funciton<commit_after>
|
# coding=utf-8
import os
"""
公用方法
"""
"""
日志
level: 级别
msg: 信息
"""
log_limit_level = 2
def log(level, msg, limit_level=log_limit_level):
if 2-level > log_limit_level:
return
if level == 0: # [info]
print "[info] " + str(msg)
elif level == 1:
print "[warn] " + str(msg)
elif level == 2:
print "[error] " + str(msg)
exit()
"""
编译 C 程序
file_path: 文件路径
compile_args: 编译参数
"""
def compile(file_path, compile_args):
log(0, '正在编译: ' + file_path)
"""
判断文件是否存在
"""
def exist_file(file_path):
return os.path.exists(file_path)
|
Add util & lots of funciton# coding=utf-8
import os
"""
公用方法
"""
"""
日志
level: 级别
msg: 信息
"""
log_limit_level = 2
def log(level, msg, limit_level=log_limit_level):
if 2-level > log_limit_level:
return
if level == 0: # [info]
print "[info] " + str(msg)
elif level == 1:
print "[warn] " + str(msg)
elif level == 2:
print "[error] " + str(msg)
exit()
"""
编译 C 程序
file_path: 文件路径
compile_args: 编译参数
"""
def compile(file_path, compile_args):
log(0, '正在编译: ' + file_path)
"""
判断文件是否存在
"""
def exist_file(file_path):
return os.path.exists(file_path)
|
<commit_before><commit_msg>Add util & lots of funciton<commit_after># coding=utf-8
import os
"""
公用方法
"""
"""
日志
level: 级别
msg: 信息
"""
log_limit_level = 2
def log(level, msg, limit_level=log_limit_level):
if 2-level > log_limit_level:
return
if level == 0: # [info]
print "[info] " + str(msg)
elif level == 1:
print "[warn] " + str(msg)
elif level == 2:
print "[error] " + str(msg)
exit()
"""
编译 C 程序
file_path: 文件路径
compile_args: 编译参数
"""
def compile(file_path, compile_args):
log(0, '正在编译: ' + file_path)
"""
判断文件是否存在
"""
def exist_file(file_path):
return os.path.exists(file_path)
|
|
194bf3f4ba0b3488a5cfbb536247ac9f1bc7a006
|
tests/test_vcs_prompt.py
|
tests/test_vcs_prompt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs_prompt
---------------
"""
import os
import pytest
from cookiecutter import utils
@pytest.fixture
def clean_cookiecutter_dirs(request):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
os.mkdir('cookiecutter-pypackage/')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
os.mkdir('cookiecutter-trytonmodule/')
def remove_cookiecutter_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
request.addfinalizer(remove_cookiecutter_dirs)
|
Reimplement setup and teardown of TestVCSPrompt
|
Reimplement setup and teardown of TestVCSPrompt
|
Python
|
bsd-3-clause
|
tylerdave/cookiecutter,Vauxoo/cookiecutter,sp1rs/cookiecutter,cichm/cookiecutter,audreyr/cookiecutter,vintasoftware/cookiecutter,pjbull/cookiecutter,Springerle/cookiecutter,cichm/cookiecutter,kkujawinski/cookiecutter,Springerle/cookiecutter,moi65/cookiecutter,ramiroluz/cookiecutter,janusnic/cookiecutter,venumech/cookiecutter,0k/cookiecutter,terryjbates/cookiecutter,lgp171188/cookiecutter,cguardia/cookiecutter,kkujawinski/cookiecutter,christabor/cookiecutter,lucius-feng/cookiecutter,benthomasson/cookiecutter,cguardia/cookiecutter,agconti/cookiecutter,lucius-feng/cookiecutter,pjbull/cookiecutter,janusnic/cookiecutter,willingc/cookiecutter,willingc/cookiecutter,ionelmc/cookiecutter,stevepiercy/cookiecutter,luzfcb/cookiecutter,takeflight/cookiecutter,stevepiercy/cookiecutter,nhomar/cookiecutter,tylerdave/cookiecutter,jhermann/cookiecutter,lgp171188/cookiecutter,dajose/cookiecutter,drgarcia1986/cookiecutter,michaeljoseph/cookiecutter,nhomar/cookiecutter,hackebrot/cookiecutter,venumech/cookiecutter,foodszhang/cookiecutter,0k/cookiecutter,jhermann/cookiecutter,michaeljoseph/cookiecutter,vincentbernat/cookiecutter,moi65/cookiecutter,takeflight/cookiecutter,atlassian/cookiecutter,audreyr/cookiecutter,vintasoftware/cookiecutter,christabor/cookiecutter,foodszhang/cookiecutter,dajose/cookiecutter,atlassian/cookiecutter,ionelmc/cookiecutter,terryjbates/cookiecutter,hackebrot/cookiecutter,vincentbernat/cookiecutter,benthomasson/cookiecutter,sp1rs/cookiecutter,drgarcia1986/cookiecutter,agconti/cookiecutter,luzfcb/cookiecutter,Vauxoo/cookiecutter,ramiroluz/cookiecutter
|
Reimplement setup and teardown of TestVCSPrompt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs_prompt
---------------
"""
import os
import pytest
from cookiecutter import utils
@pytest.fixture
def clean_cookiecutter_dirs(request):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
os.mkdir('cookiecutter-pypackage/')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
os.mkdir('cookiecutter-trytonmodule/')
def remove_cookiecutter_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
request.addfinalizer(remove_cookiecutter_dirs)
|
<commit_before><commit_msg>Reimplement setup and teardown of TestVCSPrompt<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs_prompt
---------------
"""
import os
import pytest
from cookiecutter import utils
@pytest.fixture
def clean_cookiecutter_dirs(request):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
os.mkdir('cookiecutter-pypackage/')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
os.mkdir('cookiecutter-trytonmodule/')
def remove_cookiecutter_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
request.addfinalizer(remove_cookiecutter_dirs)
|
Reimplement setup and teardown of TestVCSPrompt#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs_prompt
---------------
"""
import os
import pytest
from cookiecutter import utils
@pytest.fixture
def clean_cookiecutter_dirs(request):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
os.mkdir('cookiecutter-pypackage/')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
os.mkdir('cookiecutter-trytonmodule/')
def remove_cookiecutter_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
request.addfinalizer(remove_cookiecutter_dirs)
|
<commit_before><commit_msg>Reimplement setup and teardown of TestVCSPrompt<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs_prompt
---------------
"""
import os
import pytest
from cookiecutter import utils
@pytest.fixture
def clean_cookiecutter_dirs(request):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
os.mkdir('cookiecutter-pypackage/')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
os.mkdir('cookiecutter-trytonmodule/')
def remove_cookiecutter_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
request.addfinalizer(remove_cookiecutter_dirs)
|
|
ad706e2a98e564159aa5b505d7522fa39bfcceb5
|
python/video-tools/rename-thetvdb.py
|
python/video-tools/rename-thetvdb.py
|
#!/usr/bin/env python
'''
TODO:
- Get episode names, numbers, seasons from URL
- Get path to rename from stdin
- Get thetvdb url from stdin
'''
import os
import os.path
PATH_TO_RENAME = '/path/to/episodes'
INFILE_NAME = '/path/to/episodes.txt'
def main():
episodes = {}
# Get episode names, numbers, seasons
with open(INFILE_NAME) as infile:
for line in infile:
episode_name = line.split('\t')[1]
season = line.split('\t')[0].split()[0]
episode_number = line.split('\t')[0].split()[2]
episodes[episode_name] = {}
episodes[episode_name]['season'] = season
episodes[episode_name]['episode_number'] = episode_number
# Rename local episodes and move them into season subfolders
for (dirpath, dirnames, filenames) in os.walk(PATH_TO_RENAME):
if filenames != []:
for filename in filenames:
for episode_name in episodes:
if filename.lower().find(episode_name.lower()) != -1:
basename, extension = os.path.splitext(filename)
newpath = os.path.join(
PATH_TO_RENAME,
'Season {:02d}'.format(
int(episodes[episode_name]['season'])
)
)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.rename(
os.path.join(
dirpath,
filename
),
os.path.join(
newpath,
'S{:02d}E{:02d} - {}.{}'.format(
int(episodes[episode_name]['season']),
int(episodes[episode_name]['episode_number']),
episode_name,
extension
)
)
)
if __name__ == '__main__':
main()
|
Add simple script for renaming files using thetvdb.com
|
Add simple script for renaming files using thetvdb.com
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add simple script for renaming files using thetvdb.com
|
#!/usr/bin/env python
'''
TODO:
- Get episode names, numbers, seasons from URL
- Get path to rename from stdin
- Get thetvdb url from stdin
'''
import os
import os.path
PATH_TO_RENAME = '/path/to/episodes'
INFILE_NAME = '/path/to/episodes.txt'
def main():
episodes = {}
# Get episode names, numbers, seasons
with open(INFILE_NAME) as infile:
for line in infile:
episode_name = line.split('\t')[1]
season = line.split('\t')[0].split()[0]
episode_number = line.split('\t')[0].split()[2]
episodes[episode_name] = {}
episodes[episode_name]['season'] = season
episodes[episode_name]['episode_number'] = episode_number
# Rename local episodes and move them into season subfolders
for (dirpath, dirnames, filenames) in os.walk(PATH_TO_RENAME):
if filenames != []:
for filename in filenames:
for episode_name in episodes:
if filename.lower().find(episode_name.lower()) != -1:
basename, extension = os.path.splitext(filename)
newpath = os.path.join(
PATH_TO_RENAME,
'Season {:02d}'.format(
int(episodes[episode_name]['season'])
)
)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.rename(
os.path.join(
dirpath,
filename
),
os.path.join(
newpath,
'S{:02d}E{:02d} - {}.{}'.format(
int(episodes[episode_name]['season']),
int(episodes[episode_name]['episode_number']),
episode_name,
extension
)
)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple script for renaming files using thetvdb.com<commit_after>
|
#!/usr/bin/env python
'''
TODO:
- Get episode names, numbers, seasons from URL
- Get path to rename from stdin
- Get thetvdb url from stdin
'''
import os
import os.path
PATH_TO_RENAME = '/path/to/episodes'
INFILE_NAME = '/path/to/episodes.txt'
def main():
episodes = {}
# Get episode names, numbers, seasons
with open(INFILE_NAME) as infile:
for line in infile:
episode_name = line.split('\t')[1]
season = line.split('\t')[0].split()[0]
episode_number = line.split('\t')[0].split()[2]
episodes[episode_name] = {}
episodes[episode_name]['season'] = season
episodes[episode_name]['episode_number'] = episode_number
# Rename local episodes and move them into season subfolders
for (dirpath, dirnames, filenames) in os.walk(PATH_TO_RENAME):
if filenames != []:
for filename in filenames:
for episode_name in episodes:
if filename.lower().find(episode_name.lower()) != -1:
basename, extension = os.path.splitext(filename)
newpath = os.path.join(
PATH_TO_RENAME,
'Season {:02d}'.format(
int(episodes[episode_name]['season'])
)
)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.rename(
os.path.join(
dirpath,
filename
),
os.path.join(
newpath,
'S{:02d}E{:02d} - {}.{}'.format(
int(episodes[episode_name]['season']),
int(episodes[episode_name]['episode_number']),
episode_name,
extension
)
)
)
if __name__ == '__main__':
main()
|
Add simple script for renaming files using thetvdb.com#!/usr/bin/env python
'''
TODO:
- Get episode names, numbers, seasons from URL
- Get path to rename from stdin
- Get thetvdb url from stdin
'''
import os
import os.path
PATH_TO_RENAME = '/path/to/episodes'
INFILE_NAME = '/path/to/episodes.txt'
def main():
episodes = {}
# Get episode names, numbers, seasons
with open(INFILE_NAME) as infile:
for line in infile:
episode_name = line.split('\t')[1]
season = line.split('\t')[0].split()[0]
episode_number = line.split('\t')[0].split()[2]
episodes[episode_name] = {}
episodes[episode_name]['season'] = season
episodes[episode_name]['episode_number'] = episode_number
# Rename local episodes and move them into season subfolders
for (dirpath, dirnames, filenames) in os.walk(PATH_TO_RENAME):
if filenames != []:
for filename in filenames:
for episode_name in episodes:
if filename.lower().find(episode_name.lower()) != -1:
basename, extension = os.path.splitext(filename)
newpath = os.path.join(
PATH_TO_RENAME,
'Season {:02d}'.format(
int(episodes[episode_name]['season'])
)
)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.rename(
os.path.join(
dirpath,
filename
),
os.path.join(
newpath,
'S{:02d}E{:02d} - {}.{}'.format(
int(episodes[episode_name]['season']),
int(episodes[episode_name]['episode_number']),
episode_name,
extension
)
)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple script for renaming files using thetvdb.com<commit_after>#!/usr/bin/env python
'''
TODO:
- Get episode names, numbers, seasons from URL
- Get path to rename from stdin
- Get thetvdb url from stdin
'''
import os
import os.path
PATH_TO_RENAME = '/path/to/episodes'
INFILE_NAME = '/path/to/episodes.txt'
def main():
episodes = {}
# Get episode names, numbers, seasons
with open(INFILE_NAME) as infile:
for line in infile:
episode_name = line.split('\t')[1]
season = line.split('\t')[0].split()[0]
episode_number = line.split('\t')[0].split()[2]
episodes[episode_name] = {}
episodes[episode_name]['season'] = season
episodes[episode_name]['episode_number'] = episode_number
# Rename local episodes and move them into season subfolders
for (dirpath, dirnames, filenames) in os.walk(PATH_TO_RENAME):
if filenames != []:
for filename in filenames:
for episode_name in episodes:
if filename.lower().find(episode_name.lower()) != -1:
basename, extension = os.path.splitext(filename)
newpath = os.path.join(
PATH_TO_RENAME,
'Season {:02d}'.format(
int(episodes[episode_name]['season'])
)
)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.rename(
os.path.join(
dirpath,
filename
),
os.path.join(
newpath,
'S{:02d}E{:02d} - {}.{}'.format(
int(episodes[episode_name]['season']),
int(episodes[episode_name]['episode_number']),
episode_name,
extension
)
)
)
if __name__ == '__main__':
main()
|
|
d36f7dbdc6b25e85ca857074edda0b2d33a4b833
|
bluebottle/files/migrations/0003_auto_20191111_1533.py
|
bluebottle/files/migrations/0003_auto_20191111_1533.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'add_image',
'change_image',
'delete_image',
'add_document',
'change_document',
'delete_document',
)
},
}
update_group_permissions('files', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('files', '0002_relatedimage'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add document / image permissions
|
Add document / image permissions
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add document / image permissions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'add_image',
'change_image',
'delete_image',
'add_document',
'change_document',
'delete_document',
)
},
}
update_group_permissions('files', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('files', '0002_relatedimage'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add document / image permissions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'add_image',
'change_image',
'delete_image',
'add_document',
'change_document',
'delete_document',
)
},
}
update_group_permissions('files', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('files', '0002_relatedimage'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add document / image permissions# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'add_image',
'change_image',
'delete_image',
'add_document',
'change_document',
'delete_document',
)
},
}
update_group_permissions('files', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('files', '0002_relatedimage'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add document / image permissions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'add_image',
'change_image',
'delete_image',
'add_document',
'change_document',
'delete_document',
)
},
}
update_group_permissions('files', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('files', '0002_relatedimage'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
|
89221866255a8191938346b59ea32bc805fa6be9
|
tests/ExperimentTest.py
|
tests/ExperimentTest.py
|
import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from GraphDBInit import *
from DataTestTemplate import _DataTest
class ExperimentTest(_DataTest):
def test_DataUser(self):
do = Experiment('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
|
Add one test for Experiment object
|
Add one test for Experiment object
This was copied over from the tests for related dataobjects.
Maybe all of these types of test could be factored out into the
DataObjectTest.py testfile?
|
Python
|
mit
|
gsarma/PyOpenWorm,openworm/PyOpenWorm,openworm/PyOpenWorm,gsarma/PyOpenWorm
|
Add one test for Experiment object
This was copied over from the tests for related dataobjects.
Maybe all of these types of test could be factored out into the
DataObjectTest.py testfile?
|
import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from GraphDBInit import *
from DataTestTemplate import _DataTest
class ExperimentTest(_DataTest):
def test_DataUser(self):
do = Experiment('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
|
<commit_before><commit_msg>Add one test for Experiment object
This was copied over from the tests for related dataobjects.
Maybe all of these types of test could be factored out into the
DataObjectTest.py testfile?<commit_after>
|
import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from GraphDBInit import *
from DataTestTemplate import _DataTest
class ExperimentTest(_DataTest):
def test_DataUser(self):
do = Experiment('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
|
Add one test for Experiment object
This was copied over from the tests for related dataobjects.
Maybe all of these types of test could be factored out into the
DataObjectTest.py testfile?import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from GraphDBInit import *
from DataTestTemplate import _DataTest
class ExperimentTest(_DataTest):
def test_DataUser(self):
do = Experiment('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
|
<commit_before><commit_msg>Add one test for Experiment object
This was copied over from the tests for related dataobjects.
Maybe all of these types of test could be factored out into the
DataObjectTest.py testfile?<commit_after>import sys
sys.path.insert(0,".")
import unittest
import neuroml
import neuroml.writers as writers
import PyOpenWorm
from PyOpenWorm import *
import networkx
import rdflib
import rdflib as R
import pint as Q
import os
import subprocess as SP
import subprocess
import tempfile
import doctest
from glob import glob
from GraphDBInit import *
from DataTestTemplate import _DataTest
class ExperimentTest(_DataTest):
def test_DataUser(self):
do = Experiment('', conf=self.config)
self.assertTrue(isinstance(do, DataUser))
|
|
5bfe049c57198367ec244ff6ae7779c87d9f1e53
|
tests/cases/uris.py
|
tests/cases/uris.py
|
[
{
"message": b"GET / HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo%20bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo%20bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat+wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat+wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /#fragment HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/#fragment",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /invalid spaces but should parse/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/invalid spaces but should parse/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"OPTIONS * HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"OPTIONS",
"request_uri": b"*",
"http_version": b"HTTP/1.1",
},
},
]
|
Test a variety of URIs
|
Test a variety of URIs
|
Python
|
apache-2.0
|
dstufft/http11,dstufft/http11
|
Test a variety of URIs
|
[
{
"message": b"GET / HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo%20bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo%20bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat+wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat+wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /#fragment HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/#fragment",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /invalid spaces but should parse/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/invalid spaces but should parse/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"OPTIONS * HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"OPTIONS",
"request_uri": b"*",
"http_version": b"HTTP/1.1",
},
},
]
|
<commit_before><commit_msg>Test a variety of URIs<commit_after>
|
[
{
"message": b"GET / HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo%20bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo%20bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat+wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat+wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /#fragment HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/#fragment",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /invalid spaces but should parse/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/invalid spaces but should parse/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"OPTIONS * HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"OPTIONS",
"request_uri": b"*",
"http_version": b"HTTP/1.1",
},
},
]
|
Test a variety of URIs[
{
"message": b"GET / HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo%20bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo%20bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat+wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat+wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /#fragment HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/#fragment",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /invalid spaces but should parse/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/invalid spaces but should parse/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"OPTIONS * HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"OPTIONS",
"request_uri": b"*",
"http_version": b"HTTP/1.1",
},
},
]
|
<commit_before><commit_msg>Test a variety of URIs<commit_after>[
{
"message": b"GET / HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo%20bar/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo%20bar/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /foo/bar/?q=wat+wat HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/foo/bar/?q=wat+wat",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /#fragment HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/#fragment",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"GET /invalid spaces but should parse/ HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"GET",
"request_uri": b"/invalid spaces but should parse/",
"http_version": b"HTTP/1.1",
},
},
{
"message": b"OPTIONS * HTTP/1.1\r\n\r\n",
"expected": {
"request_method": b"OPTIONS",
"request_uri": b"*",
"http_version": b"HTTP/1.1",
},
},
]
|
|
862b18e3e0c93418124068ddd76f4ad37f71f015
|
toolbox/neph_cli_util.py
|
toolbox/neph_cli_util.py
|
import argparse
import os
from nephoria.testcontroller import TestController
import logging
parser = argparse.ArgumentParser(description='Euca Runtime Config Generator')
parser.add_argument('--get-credentials', dest='get_credentials', action='store_true',
default=False,
help='Operation to get credentials, boolean.(not used)')
parser.add_argument('--cred-user', dest='cred_user',
default='admin',
help='Username to use for building runtime config, default:"admin"')
parser.add_argument('--cred-account', dest='cred_account',
default='eucalyptus',
help='Account name to use for building runtime config, default:"eucalyptus"')
parser.add_argument('--clc-ip', dest='clc_ip',
default="127.0.0.1",
help='CLC host ip, default:"127.0.0.1"')
parser.add_argument('--clc-password', dest='clc_password',
default=None,
help='Optional CLC root password')
parser.add_argument('filepath',
default=None,
help='Optional zip filename/path. Default is ./<cred_user>.zip')
parser.add_argument('--log-level', dest='log_level',
default="DEBUG",
help='Log level')
parser.add_argument('--unpack', dest='unpack', action='store_true',
default=False,
help='Create files in addition to zip archive')
args = parser.parse_args()
zip_path = args.filepath
if zip_path:
zip_dest_dir = os.path.dirname(zip_path)
zip_file_name = os.path.basename(zip_path)
else:
zip_dest_dir = None
zip_file_name = "{0}_{1}.zip".format(args.cred_account, args.cred_user)
zip_only = True
if args.unpack:
zip_only = False
logger = logging.getLogger('eulogger')
log_level = getattr(logging, args.log_level.upper(), logging.DEBUG)
logger.setLevel(log_level)
tc = TestController(args.clc_ip, log_level=args.log_level)
user = tc.get_user_by_name(aws_account_name=args.cred_account, aws_user_name=args.cred_user)
user.create_local_creds(local_destdir=zip_dest_dir, zipfilename=zip_file_name, ziponly=zip_only)
|
Add first pass at simple cli tooling
|
Add first pass at simple cli tooling
|
Python
|
bsd-2-clause
|
nephomaniac/nephoria,nephomaniac/nephoria
|
Add first pass at simple cli tooling
|
import argparse
import os
from nephoria.testcontroller import TestController
import logging
parser = argparse.ArgumentParser(description='Euca Runtime Config Generator')
parser.add_argument('--get-credentials', dest='get_credentials', action='store_true',
default=False,
help='Operation to get credentials, boolean.(not used)')
parser.add_argument('--cred-user', dest='cred_user',
default='admin',
help='Username to use for building runtime config, default:"admin"')
parser.add_argument('--cred-account', dest='cred_account',
default='eucalyptus',
help='Account name to use for building runtime config, default:"eucalyptus"')
parser.add_argument('--clc-ip', dest='clc_ip',
default="127.0.0.1",
help='CLC host ip, default:"127.0.0.1"')
parser.add_argument('--clc-password', dest='clc_password',
default=None,
help='Optional CLC root password')
parser.add_argument('filepath',
default=None,
help='Optional zip filename/path. Default is ./<cred_user>.zip')
parser.add_argument('--log-level', dest='log_level',
default="DEBUG",
help='Log level')
parser.add_argument('--unpack', dest='unpack', action='store_true',
default=False,
help='Create files in addition to zip archive')
args = parser.parse_args()
zip_path = args.filepath
if zip_path:
zip_dest_dir = os.path.dirname(zip_path)
zip_file_name = os.path.basename(zip_path)
else:
zip_dest_dir = None
zip_file_name = "{0}_{1}.zip".format(args.cred_account, args.cred_user)
zip_only = True
if args.unpack:
zip_only = False
logger = logging.getLogger('eulogger')
log_level = getattr(logging, args.log_level.upper(), logging.DEBUG)
logger.setLevel(log_level)
tc = TestController(args.clc_ip, log_level=args.log_level)
user = tc.get_user_by_name(aws_account_name=args.cred_account, aws_user_name=args.cred_user)
user.create_local_creds(local_destdir=zip_dest_dir, zipfilename=zip_file_name, ziponly=zip_only)
|
<commit_before><commit_msg>Add first pass at simple cli tooling<commit_after>
|
import argparse
import os
from nephoria.testcontroller import TestController
import logging
parser = argparse.ArgumentParser(description='Euca Runtime Config Generator')
parser.add_argument('--get-credentials', dest='get_credentials', action='store_true',
default=False,
help='Operation to get credentials, boolean.(not used)')
parser.add_argument('--cred-user', dest='cred_user',
default='admin',
help='Username to use for building runtime config, default:"admin"')
parser.add_argument('--cred-account', dest='cred_account',
default='eucalyptus',
help='Account name to use for building runtime config, default:"eucalyptus"')
parser.add_argument('--clc-ip', dest='clc_ip',
default="127.0.0.1",
help='CLC host ip, default:"127.0.0.1"')
parser.add_argument('--clc-password', dest='clc_password',
default=None,
help='Optional CLC root password')
parser.add_argument('filepath',
default=None,
help='Optional zip filename/path. Default is ./<cred_user>.zip')
parser.add_argument('--log-level', dest='log_level',
default="DEBUG",
help='Log level')
parser.add_argument('--unpack', dest='unpack', action='store_true',
default=False,
help='Create files in addition to zip archive')
args = parser.parse_args()
zip_path = args.filepath
if zip_path:
zip_dest_dir = os.path.dirname(zip_path)
zip_file_name = os.path.basename(zip_path)
else:
zip_dest_dir = None
zip_file_name = "{0}_{1}.zip".format(args.cred_account, args.cred_user)
zip_only = True
if args.unpack:
zip_only = False
logger = logging.getLogger('eulogger')
log_level = getattr(logging, args.log_level.upper(), logging.DEBUG)
logger.setLevel(log_level)
tc = TestController(args.clc_ip, log_level=args.log_level)
user = tc.get_user_by_name(aws_account_name=args.cred_account, aws_user_name=args.cred_user)
user.create_local_creds(local_destdir=zip_dest_dir, zipfilename=zip_file_name, ziponly=zip_only)
|
Add first pass at simple cli toolingimport argparse
import os
from nephoria.testcontroller import TestController
import logging
parser = argparse.ArgumentParser(description='Euca Runtime Config Generator')
parser.add_argument('--get-credentials', dest='get_credentials', action='store_true',
default=False,
help='Operation to get credentials, boolean.(not used)')
parser.add_argument('--cred-user', dest='cred_user',
default='admin',
help='Username to use for building runtime config, default:"admin"')
parser.add_argument('--cred-account', dest='cred_account',
default='eucalyptus',
help='Account name to use for building runtime config, default:"eucalyptus"')
parser.add_argument('--clc-ip', dest='clc_ip',
default="127.0.0.1",
help='CLC host ip, default:"127.0.0.1"')
parser.add_argument('--clc-password', dest='clc_password',
default=None,
help='Optional CLC root password')
parser.add_argument('filepath',
default=None,
help='Optional zip filename/path. Default is ./<cred_user>.zip')
parser.add_argument('--log-level', dest='log_level',
default="DEBUG",
help='Log level')
parser.add_argument('--unpack', dest='unpack', action='store_true',
default=False,
help='Create files in addition to zip archive')
args = parser.parse_args()
zip_path = args.filepath
if zip_path:
zip_dest_dir = os.path.dirname(zip_path)
zip_file_name = os.path.basename(zip_path)
else:
zip_dest_dir = None
zip_file_name = "{0}_{1}.zip".format(args.cred_account, args.cred_user)
zip_only = True
if args.unpack:
zip_only = False
logger = logging.getLogger('eulogger')
log_level = getattr(logging, args.log_level.upper(), logging.DEBUG)
logger.setLevel(log_level)
tc = TestController(args.clc_ip, log_level=args.log_level)
user = tc.get_user_by_name(aws_account_name=args.cred_account, aws_user_name=args.cred_user)
user.create_local_creds(local_destdir=zip_dest_dir, zipfilename=zip_file_name, ziponly=zip_only)
|
<commit_before><commit_msg>Add first pass at simple cli tooling<commit_after>import argparse
import os
from nephoria.testcontroller import TestController
import logging
parser = argparse.ArgumentParser(description='Euca Runtime Config Generator')
parser.add_argument('--get-credentials', dest='get_credentials', action='store_true',
default=False,
help='Operation to get credentials, boolean.(not used)')
parser.add_argument('--cred-user', dest='cred_user',
default='admin',
help='Username to use for building runtime config, default:"admin"')
parser.add_argument('--cred-account', dest='cred_account',
default='eucalyptus',
help='Account name to use for building runtime config, default:"eucalyptus"')
parser.add_argument('--clc-ip', dest='clc_ip',
default="127.0.0.1",
help='CLC host ip, default:"127.0.0.1"')
parser.add_argument('--clc-password', dest='clc_password',
default=None,
help='Optional CLC root password')
parser.add_argument('filepath',
default=None,
help='Optional zip filename/path. Default is ./<cred_user>.zip')
parser.add_argument('--log-level', dest='log_level',
default="DEBUG",
help='Log level')
parser.add_argument('--unpack', dest='unpack', action='store_true',
default=False,
help='Create files in addition to zip archive')
args = parser.parse_args()
zip_path = args.filepath
if zip_path:
zip_dest_dir = os.path.dirname(zip_path)
zip_file_name = os.path.basename(zip_path)
else:
zip_dest_dir = None
zip_file_name = "{0}_{1}.zip".format(args.cred_account, args.cred_user)
zip_only = True
if args.unpack:
zip_only = False
logger = logging.getLogger('eulogger')
log_level = getattr(logging, args.log_level.upper(), logging.DEBUG)
logger.setLevel(log_level)
tc = TestController(args.clc_ip, log_level=args.log_level)
user = tc.get_user_by_name(aws_account_name=args.cred_account, aws_user_name=args.cred_user)
user.create_local_creds(local_destdir=zip_dest_dir, zipfilename=zip_file_name, ziponly=zip_only)
|
|
8f7e6623e34827f480eee971a8922b56922aa3e4
|
tests/test_queryable.py
|
tests/test_queryable.py
|
from busbus.queryable import Queryable
def test_queryable():
q = Queryable(xrange(10)).where(lambda x: x % 5 == 0)
assert next(q) == 0
assert next(q) == 5
|
Add basic test case for Queryable class
|
Add basic test case for Queryable class
|
Python
|
mit
|
spaceboats/busbus
|
Add basic test case for Queryable class
|
from busbus.queryable import Queryable
def test_queryable():
q = Queryable(xrange(10)).where(lambda x: x % 5 == 0)
assert next(q) == 0
assert next(q) == 5
|
<commit_before><commit_msg>Add basic test case for Queryable class<commit_after>
|
from busbus.queryable import Queryable
def test_queryable():
q = Queryable(xrange(10)).where(lambda x: x % 5 == 0)
assert next(q) == 0
assert next(q) == 5
|
Add basic test case for Queryable classfrom busbus.queryable import Queryable
def test_queryable():
q = Queryable(xrange(10)).where(lambda x: x % 5 == 0)
assert next(q) == 0
assert next(q) == 5
|
<commit_before><commit_msg>Add basic test case for Queryable class<commit_after>from busbus.queryable import Queryable
def test_queryable():
q = Queryable(xrange(10)).where(lambda x: x % 5 == 0)
assert next(q) == 0
assert next(q) == 5
|
|
ce948382108303020069f8805c92a930c5b6c99a
|
data_text/parse_tafseer_text.py
|
data_text/parse_tafseer_text.py
|
from quran_text.models import Ayah
from quran_tafseer.models import TafseerText, Tafseer
def parse_tafseer_file(file_name, tafseer_name):
with open(file_name, 'r') as tafseer_file:
tafseer = Tafseer.objects.create(name=tafseer_name)
for line in tafseer_file:
sura, ayah, text = line.strip().split('|')
ayah_obj = Ayah.objects.get(number=ayah, sura_id=sura)
TafseerText.objects.create(ayah=ayah_obj,
text=text,
tafseer=tafseer)
print('Done parsing file {} and create {} tafseer'.format(file_name,
tafseer_name)
)
def parse_tafseer_muyassar():
parse_tafseer_file('data_text/ar.muyassar.txt', 'التفسير الميسر')
def parse_tafseer_jalalayn():
parse_tafseer_file('data_text/ar.jalalayn.txt', 'تفسير الجلالين')
|
Create code to parse Tafseer files fomr Tanzil.net
|
Create code to parse Tafseer files fomr Tanzil.net
|
Python
|
mit
|
EmadMokhtar/tafseer_api
|
Create code to parse Tafseer files fomr Tanzil.net
|
from quran_text.models import Ayah
from quran_tafseer.models import TafseerText, Tafseer
def parse_tafseer_file(file_name, tafseer_name):
with open(file_name, 'r') as tafseer_file:
tafseer = Tafseer.objects.create(name=tafseer_name)
for line in tafseer_file:
sura, ayah, text = line.strip().split('|')
ayah_obj = Ayah.objects.get(number=ayah, sura_id=sura)
TafseerText.objects.create(ayah=ayah_obj,
text=text,
tafseer=tafseer)
print('Done parsing file {} and create {} tafseer'.format(file_name,
tafseer_name)
)
def parse_tafseer_muyassar():
parse_tafseer_file('data_text/ar.muyassar.txt', 'التفسير الميسر')
def parse_tafseer_jalalayn():
parse_tafseer_file('data_text/ar.jalalayn.txt', 'تفسير الجلالين')
|
<commit_before><commit_msg>Create code to parse Tafseer files fomr Tanzil.net<commit_after>
|
from quran_text.models import Ayah
from quran_tafseer.models import TafseerText, Tafseer
def parse_tafseer_file(file_name, tafseer_name):
with open(file_name, 'r') as tafseer_file:
tafseer = Tafseer.objects.create(name=tafseer_name)
for line in tafseer_file:
sura, ayah, text = line.strip().split('|')
ayah_obj = Ayah.objects.get(number=ayah, sura_id=sura)
TafseerText.objects.create(ayah=ayah_obj,
text=text,
tafseer=tafseer)
print('Done parsing file {} and create {} tafseer'.format(file_name,
tafseer_name)
)
def parse_tafseer_muyassar():
parse_tafseer_file('data_text/ar.muyassar.txt', 'التفسير الميسر')
def parse_tafseer_jalalayn():
parse_tafseer_file('data_text/ar.jalalayn.txt', 'تفسير الجلالين')
|
Create code to parse Tafseer files fomr Tanzil.netfrom quran_text.models import Ayah
from quran_tafseer.models import TafseerText, Tafseer
def parse_tafseer_file(file_name, tafseer_name):
with open(file_name, 'r') as tafseer_file:
tafseer = Tafseer.objects.create(name=tafseer_name)
for line in tafseer_file:
sura, ayah, text = line.strip().split('|')
ayah_obj = Ayah.objects.get(number=ayah, sura_id=sura)
TafseerText.objects.create(ayah=ayah_obj,
text=text,
tafseer=tafseer)
print('Done parsing file {} and create {} tafseer'.format(file_name,
tafseer_name)
)
def parse_tafseer_muyassar():
parse_tafseer_file('data_text/ar.muyassar.txt', 'التفسير الميسر')
def parse_tafseer_jalalayn():
parse_tafseer_file('data_text/ar.jalalayn.txt', 'تفسير الجلالين')
|
<commit_before><commit_msg>Create code to parse Tafseer files fomr Tanzil.net<commit_after>from quran_text.models import Ayah
from quran_tafseer.models import TafseerText, Tafseer
def parse_tafseer_file(file_name, tafseer_name):
with open(file_name, 'r') as tafseer_file:
tafseer = Tafseer.objects.create(name=tafseer_name)
for line in tafseer_file:
sura, ayah, text = line.strip().split('|')
ayah_obj = Ayah.objects.get(number=ayah, sura_id=sura)
TafseerText.objects.create(ayah=ayah_obj,
text=text,
tafseer=tafseer)
print('Done parsing file {} and create {} tafseer'.format(file_name,
tafseer_name)
)
def parse_tafseer_muyassar():
parse_tafseer_file('data_text/ar.muyassar.txt', 'التفسير الميسر')
def parse_tafseer_jalalayn():
parse_tafseer_file('data_text/ar.jalalayn.txt', 'تفسير الجلالين')
|
|
074730f14d3b04305b67baf2e6bc853c13c9bdd8
|
training/get_metrics.py
|
training/get_metrics.py
|
import pickle
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import v_measure_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', '-e', type=str, required=True)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
args = parser.parse_args()
home = os.path.expanduser('~')
experiments_dir = os.path.join(home, 'smart-news-query-embeddings', 'experiments')
exp_dir = os.path.join(experiments_dir, args.exp_name)
embeddings_path = os.path.join(exp_dir, 'embeddings', 'valid_embeddings.npy')
embeddings = np.load(embeddings_path)
valid_data_path = os.path.join(exp_dir, 'data', 'valid_data.pkl')
valid_labels_path = os.path.join(exp_dir, 'data', 'valid_labels.pkl')
with open(valid_data_path, 'rb') as f:
valid_data = pickle.load(f)
with open(valid_labels_path, 'rb') as f:
valid_labels = pickle.load(f)
# model_path = os.path.join(exp_dir, 'model')
# model = tf.keras.models.load_model(model_path)
# pred_labels = model.predict(valid_data).argmax(axis=1)
valid_labels = valid_labels.argmax(axis=1)
# print('Validation Accuracy:', np.mean(pred_labels == valid_labels))
N = valid_labels.shape[0]
indices = np.random.choice(N, size=10000, replace=False)
embeddings = embeddings[indices]
valid_labels = valid_labels[indices]
agg = AgglomerativeClustering(n_clusters=40).fit(embeddings)
print('V-measure score (agglomerative clustering):', v_measure_score(valid_labels, agg.labels_))
valid_df_path = os.path.join(exp_dir, 'data', 'valid_sentences.pkl')
valid_sentences = pd.read_pickle(valid_df_path).iloc[indices]
valid_df = pd.DataFrame({
'abstract': valid_sentences,
'label': agg.labels_
})
kmeans = KMeans(n_clusters=40).fit(embeddings)
print('V-measure score (K-means clustering):', v_measure_score(valid_labels, kmeans.labels_))
print(valid_df.shape)
labels_out_path = os.path.join(exp_dir, 'data', 'embedding_labels.tsv')
embeddings_out_path = os.path.join(exp_dir, 'data', 'embeddings.tsv')
np.savetxt(embeddings_out_path, embeddings, delimiter='\t')
valid_df.to_csv(labels_out_path, sep='\t')
|
Add script to get metrics from BERT models
|
Add script to get metrics from BERT models
|
Python
|
apache-2.0
|
googleinterns/smart-news-query-embeddings,googleinterns/smart-news-query-embeddings
|
Add script to get metrics from BERT models
|
import pickle
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import v_measure_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', '-e', type=str, required=True)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
args = parser.parse_args()
home = os.path.expanduser('~')
experiments_dir = os.path.join(home, 'smart-news-query-embeddings', 'experiments')
exp_dir = os.path.join(experiments_dir, args.exp_name)
embeddings_path = os.path.join(exp_dir, 'embeddings', 'valid_embeddings.npy')
embeddings = np.load(embeddings_path)
valid_data_path = os.path.join(exp_dir, 'data', 'valid_data.pkl')
valid_labels_path = os.path.join(exp_dir, 'data', 'valid_labels.pkl')
with open(valid_data_path, 'rb') as f:
valid_data = pickle.load(f)
with open(valid_labels_path, 'rb') as f:
valid_labels = pickle.load(f)
# model_path = os.path.join(exp_dir, 'model')
# model = tf.keras.models.load_model(model_path)
# pred_labels = model.predict(valid_data).argmax(axis=1)
valid_labels = valid_labels.argmax(axis=1)
# print('Validation Accuracy:', np.mean(pred_labels == valid_labels))
N = valid_labels.shape[0]
indices = np.random.choice(N, size=10000, replace=False)
embeddings = embeddings[indices]
valid_labels = valid_labels[indices]
agg = AgglomerativeClustering(n_clusters=40).fit(embeddings)
print('V-measure score (agglomerative clustering):', v_measure_score(valid_labels, agg.labels_))
valid_df_path = os.path.join(exp_dir, 'data', 'valid_sentences.pkl')
valid_sentences = pd.read_pickle(valid_df_path).iloc[indices]
valid_df = pd.DataFrame({
'abstract': valid_sentences,
'label': agg.labels_
})
kmeans = KMeans(n_clusters=40).fit(embeddings)
print('V-measure score (K-means clustering):', v_measure_score(valid_labels, kmeans.labels_))
print(valid_df.shape)
labels_out_path = os.path.join(exp_dir, 'data', 'embedding_labels.tsv')
embeddings_out_path = os.path.join(exp_dir, 'data', 'embeddings.tsv')
np.savetxt(embeddings_out_path, embeddings, delimiter='\t')
valid_df.to_csv(labels_out_path, sep='\t')
|
<commit_before><commit_msg>Add script to get metrics from BERT models<commit_after>
|
import pickle
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import v_measure_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', '-e', type=str, required=True)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
args = parser.parse_args()
home = os.path.expanduser('~')
experiments_dir = os.path.join(home, 'smart-news-query-embeddings', 'experiments')
exp_dir = os.path.join(experiments_dir, args.exp_name)
embeddings_path = os.path.join(exp_dir, 'embeddings', 'valid_embeddings.npy')
embeddings = np.load(embeddings_path)
valid_data_path = os.path.join(exp_dir, 'data', 'valid_data.pkl')
valid_labels_path = os.path.join(exp_dir, 'data', 'valid_labels.pkl')
with open(valid_data_path, 'rb') as f:
valid_data = pickle.load(f)
with open(valid_labels_path, 'rb') as f:
valid_labels = pickle.load(f)
# model_path = os.path.join(exp_dir, 'model')
# model = tf.keras.models.load_model(model_path)
# pred_labels = model.predict(valid_data).argmax(axis=1)
valid_labels = valid_labels.argmax(axis=1)
# print('Validation Accuracy:', np.mean(pred_labels == valid_labels))
N = valid_labels.shape[0]
indices = np.random.choice(N, size=10000, replace=False)
embeddings = embeddings[indices]
valid_labels = valid_labels[indices]
agg = AgglomerativeClustering(n_clusters=40).fit(embeddings)
print('V-measure score (agglomerative clustering):', v_measure_score(valid_labels, agg.labels_))
valid_df_path = os.path.join(exp_dir, 'data', 'valid_sentences.pkl')
valid_sentences = pd.read_pickle(valid_df_path).iloc[indices]
valid_df = pd.DataFrame({
'abstract': valid_sentences,
'label': agg.labels_
})
kmeans = KMeans(n_clusters=40).fit(embeddings)
print('V-measure score (K-means clustering):', v_measure_score(valid_labels, kmeans.labels_))
print(valid_df.shape)
labels_out_path = os.path.join(exp_dir, 'data', 'embedding_labels.tsv')
embeddings_out_path = os.path.join(exp_dir, 'data', 'embeddings.tsv')
np.savetxt(embeddings_out_path, embeddings, delimiter='\t')
valid_df.to_csv(labels_out_path, sep='\t')
|
Add script to get metrics from BERT modelsimport pickle
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import v_measure_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', '-e', type=str, required=True)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
args = parser.parse_args()
home = os.path.expanduser('~')
experiments_dir = os.path.join(home, 'smart-news-query-embeddings', 'experiments')
exp_dir = os.path.join(experiments_dir, args.exp_name)
embeddings_path = os.path.join(exp_dir, 'embeddings', 'valid_embeddings.npy')
embeddings = np.load(embeddings_path)
valid_data_path = os.path.join(exp_dir, 'data', 'valid_data.pkl')
valid_labels_path = os.path.join(exp_dir, 'data', 'valid_labels.pkl')
with open(valid_data_path, 'rb') as f:
valid_data = pickle.load(f)
with open(valid_labels_path, 'rb') as f:
valid_labels = pickle.load(f)
# model_path = os.path.join(exp_dir, 'model')
# model = tf.keras.models.load_model(model_path)
# pred_labels = model.predict(valid_data).argmax(axis=1)
valid_labels = valid_labels.argmax(axis=1)
# print('Validation Accuracy:', np.mean(pred_labels == valid_labels))
N = valid_labels.shape[0]
indices = np.random.choice(N, size=10000, replace=False)
embeddings = embeddings[indices]
valid_labels = valid_labels[indices]
agg = AgglomerativeClustering(n_clusters=40).fit(embeddings)
print('V-measure score (agglomerative clustering):', v_measure_score(valid_labels, agg.labels_))
valid_df_path = os.path.join(exp_dir, 'data', 'valid_sentences.pkl')
valid_sentences = pd.read_pickle(valid_df_path).iloc[indices]
valid_df = pd.DataFrame({
'abstract': valid_sentences,
'label': agg.labels_
})
kmeans = KMeans(n_clusters=40).fit(embeddings)
print('V-measure score (K-means clustering):', v_measure_score(valid_labels, kmeans.labels_))
print(valid_df.shape)
labels_out_path = os.path.join(exp_dir, 'data', 'embedding_labels.tsv')
embeddings_out_path = os.path.join(exp_dir, 'data', 'embeddings.tsv')
np.savetxt(embeddings_out_path, embeddings, delimiter='\t')
valid_df.to_csv(labels_out_path, sep='\t')
|
<commit_before><commit_msg>Add script to get metrics from BERT models<commit_after>import pickle
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.metrics import v_measure_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp-name', '-e', type=str, required=True)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
args = parser.parse_args()
home = os.path.expanduser('~')
experiments_dir = os.path.join(home, 'smart-news-query-embeddings', 'experiments')
exp_dir = os.path.join(experiments_dir, args.exp_name)
embeddings_path = os.path.join(exp_dir, 'embeddings', 'valid_embeddings.npy')
embeddings = np.load(embeddings_path)
valid_data_path = os.path.join(exp_dir, 'data', 'valid_data.pkl')
valid_labels_path = os.path.join(exp_dir, 'data', 'valid_labels.pkl')
with open(valid_data_path, 'rb') as f:
valid_data = pickle.load(f)
with open(valid_labels_path, 'rb') as f:
valid_labels = pickle.load(f)
# model_path = os.path.join(exp_dir, 'model')
# model = tf.keras.models.load_model(model_path)
# pred_labels = model.predict(valid_data).argmax(axis=1)
valid_labels = valid_labels.argmax(axis=1)
# print('Validation Accuracy:', np.mean(pred_labels == valid_labels))
N = valid_labels.shape[0]
indices = np.random.choice(N, size=10000, replace=False)
embeddings = embeddings[indices]
valid_labels = valid_labels[indices]
agg = AgglomerativeClustering(n_clusters=40).fit(embeddings)
print('V-measure score (agglomerative clustering):', v_measure_score(valid_labels, agg.labels_))
valid_df_path = os.path.join(exp_dir, 'data', 'valid_sentences.pkl')
valid_sentences = pd.read_pickle(valid_df_path).iloc[indices]
valid_df = pd.DataFrame({
'abstract': valid_sentences,
'label': agg.labels_
})
kmeans = KMeans(n_clusters=40).fit(embeddings)
print('V-measure score (K-means clustering):', v_measure_score(valid_labels, kmeans.labels_))
print(valid_df.shape)
labels_out_path = os.path.join(exp_dir, 'data', 'embedding_labels.tsv')
embeddings_out_path = os.path.join(exp_dir, 'data', 'embeddings.tsv')
np.savetxt(embeddings_out_path, embeddings, delimiter='\t')
valid_df.to_csv(labels_out_path, sep='\t')
|
|
ae1a84df1bdbdbd6b92ba4210cc0d630981bc26c
|
kolibri/core/content/migrations/0026_contentnode_options.py
|
kolibri/core/content/migrations/0026_contentnode_options.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-20 14:56
from __future__ import unicode_literals
from django.db import migrations
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0025_add_h5p_kind'),
]
operations = [
migrations.AddField(
model_name='contentnode',
name='options',
field=kolibri.core.fields.JSONField(blank=True, default={}, null=True),
),
]
|
Add migration that got removed during re-generation.
|
Add migration that got removed during re-generation.
|
Python
|
mit
|
learningequality/kolibri,mrpau/kolibri,indirectlylit/kolibri,learningequality/kolibri,learningequality/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,mrpau/kolibri,mrpau/kolibri,mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri
|
Add migration that got removed during re-generation.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-20 14:56
from __future__ import unicode_literals
from django.db import migrations
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0025_add_h5p_kind'),
]
operations = [
migrations.AddField(
model_name='contentnode',
name='options',
field=kolibri.core.fields.JSONField(blank=True, default={}, null=True),
),
]
|
<commit_before><commit_msg>Add migration that got removed during re-generation.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-20 14:56
from __future__ import unicode_literals
from django.db import migrations
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0025_add_h5p_kind'),
]
operations = [
migrations.AddField(
model_name='contentnode',
name='options',
field=kolibri.core.fields.JSONField(blank=True, default={}, null=True),
),
]
|
Add migration that got removed during re-generation.# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-20 14:56
from __future__ import unicode_literals
from django.db import migrations
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0025_add_h5p_kind'),
]
operations = [
migrations.AddField(
model_name='contentnode',
name='options',
field=kolibri.core.fields.JSONField(blank=True, default={}, null=True),
),
]
|
<commit_before><commit_msg>Add migration that got removed during re-generation.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-05-20 14:56
from __future__ import unicode_literals
from django.db import migrations
import kolibri.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0025_add_h5p_kind'),
]
operations = [
migrations.AddField(
model_name='contentnode',
name='options',
field=kolibri.core.fields.JSONField(blank=True, default={}, null=True),
),
]
|
|
7047c48d0772fe61b2ba9ed9e7f2992712a63700
|
tests/test_linting.py
|
tests/test_linting.py
|
import sys
import subprocess
def test_pep8():
result = subprocess.call(['pep8', '--statistics', '--show-source'])
assert result == 0
def test_pypi():
result = subprocess.call(['python',
'setup.py',
'check',
'--restructuredtext',
'--strict',
'--metadata',
])
assert result == 0
|
Add linting tests (PEP8 and PyPi)
|
Add linting tests (PEP8 and PyPi)
|
Python
|
mit
|
Kromey/pynano
|
Add linting tests (PEP8 and PyPi)
|
import sys
import subprocess
def test_pep8():
result = subprocess.call(['pep8', '--statistics', '--show-source'])
assert result == 0
def test_pypi():
result = subprocess.call(['python',
'setup.py',
'check',
'--restructuredtext',
'--strict',
'--metadata',
])
assert result == 0
|
<commit_before><commit_msg>Add linting tests (PEP8 and PyPi)<commit_after>
|
import sys
import subprocess
def test_pep8():
result = subprocess.call(['pep8', '--statistics', '--show-source'])
assert result == 0
def test_pypi():
result = subprocess.call(['python',
'setup.py',
'check',
'--restructuredtext',
'--strict',
'--metadata',
])
assert result == 0
|
Add linting tests (PEP8 and PyPi)import sys
import subprocess
def test_pep8():
result = subprocess.call(['pep8', '--statistics', '--show-source'])
assert result == 0
def test_pypi():
result = subprocess.call(['python',
'setup.py',
'check',
'--restructuredtext',
'--strict',
'--metadata',
])
assert result == 0
|
<commit_before><commit_msg>Add linting tests (PEP8 and PyPi)<commit_after>import sys
import subprocess
def test_pep8():
result = subprocess.call(['pep8', '--statistics', '--show-source'])
assert result == 0
def test_pypi():
result = subprocess.call(['python',
'setup.py',
'check',
'--restructuredtext',
'--strict',
'--metadata',
])
assert result == 0
|
|
926d1cea1a0c52325cc66dc51dd8b941a0dfa783
|
scripts/angle_deqp_test_merge.py
|
scripts/angle_deqp_test_merge.py
|
#!/usr/bin/env python
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges dEQP sharded test results in the ANGLE testing infrastucture."""
import os
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(THIS_DIR)
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'merge_scripts'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'merge_scripts'))
import merge_api
import standard_isolated_script_merge
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
# TODO(jmadill): Merge QPA files into one. http://anglebug.com/5236
return standard_isolated_script_merge.StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add dEQP test merge script.
|
testing: Add dEQP test merge script.
This script will allow ANGLE to process custom logic when we complete
a suite of dEQP tests on the bots. The first customization step we
can do is merge the myriad batch QPA files into one master QPA file.
This script is currently a no-op and will let us set up the merge step
so we can test the logic pre-commit.
Bug: angleproject:5236
Change-Id: I38d0e359ff7fc6f6f91b5193387c13ff0fc77aa6
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2664247
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
Reviewed-by: Yuly Novikov <36f5580f63d0a3eacc17e89a61d11e2f646630d2@chromium.org>
|
Python
|
bsd-3-clause
|
ppy/angle,ppy/angle,ppy/angle,ppy/angle
|
testing: Add dEQP test merge script.
This script will allow ANGLE to process custom logic when we complete
a suite of dEQP tests on the bots. The first customization step we
can do is merge the myriad batch QPA files into one master QPA file.
This script is currently a no-op and will let us set up the merge step
so we can test the logic pre-commit.
Bug: angleproject:5236
Change-Id: I38d0e359ff7fc6f6f91b5193387c13ff0fc77aa6
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2664247
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
Reviewed-by: Yuly Novikov <36f5580f63d0a3eacc17e89a61d11e2f646630d2@chromium.org>
|
#!/usr/bin/env python
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges dEQP sharded test results in the ANGLE testing infrastucture."""
import os
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(THIS_DIR)
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'merge_scripts'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'merge_scripts'))
import merge_api
import standard_isolated_script_merge
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
# TODO(jmadill): Merge QPA files into one. http://anglebug.com/5236
return standard_isolated_script_merge.StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>testing: Add dEQP test merge script.
This script will allow ANGLE to process custom logic when we complete
a suite of dEQP tests on the bots. The first customization step we
can do is merge the myriad batch QPA files into one master QPA file.
This script is currently a no-op and will let us set up the merge step
so we can test the logic pre-commit.
Bug: angleproject:5236
Change-Id: I38d0e359ff7fc6f6f91b5193387c13ff0fc77aa6
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2664247
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
Reviewed-by: Yuly Novikov <36f5580f63d0a3eacc17e89a61d11e2f646630d2@chromium.org><commit_after>
|
#!/usr/bin/env python
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges dEQP sharded test results in the ANGLE testing infrastucture."""
import os
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(THIS_DIR)
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'merge_scripts'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'merge_scripts'))
import merge_api
import standard_isolated_script_merge
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
# TODO(jmadill): Merge QPA files into one. http://anglebug.com/5236
return standard_isolated_script_merge.StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
testing: Add dEQP test merge script.
This script will allow ANGLE to process custom logic when we complete
a suite of dEQP tests on the bots. The first customization step we
can do is merge the myriad batch QPA files into one master QPA file.
This script is currently a no-op and will let us set up the merge step
so we can test the logic pre-commit.
Bug: angleproject:5236
Change-Id: I38d0e359ff7fc6f6f91b5193387c13ff0fc77aa6
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2664247
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
Reviewed-by: Yuly Novikov <36f5580f63d0a3eacc17e89a61d11e2f646630d2@chromium.org>#!/usr/bin/env python
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges dEQP sharded test results in the ANGLE testing infrastucture."""
import os
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(THIS_DIR)
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'merge_scripts'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'merge_scripts'))
import merge_api
import standard_isolated_script_merge
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
# TODO(jmadill): Merge QPA files into one. http://anglebug.com/5236
return standard_isolated_script_merge.StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>testing: Add dEQP test merge script.
This script will allow ANGLE to process custom logic when we complete
a suite of dEQP tests on the bots. The first customization step we
can do is merge the myriad batch QPA files into one master QPA file.
This script is currently a no-op and will let us set up the merge step
so we can test the logic pre-commit.
Bug: angleproject:5236
Change-Id: I38d0e359ff7fc6f6f91b5193387c13ff0fc77aa6
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2664247
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
Reviewed-by: Yuly Novikov <36f5580f63d0a3eacc17e89a61d11e2f646630d2@chromium.org><commit_after>#!/usr/bin/env python
#
# Copyright 2021 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Merges dEQP sharded test results in the ANGLE testing infrastucture."""
import os
import sys
d = os.path.dirname
THIS_DIR = d(os.path.abspath(__file__))
ANGLE_SRC_DIR = d(THIS_DIR)
sys.path.insert(0, os.path.join(ANGLE_SRC_DIR, 'testing', 'merge_scripts'))
CHROMIUM_SRC_DIR = d(d(ANGLE_SRC_DIR))
sys.path.insert(0, os.path.join(CHROMIUM_SRC_DIR, 'testing', 'merge_scripts'))
import merge_api
import standard_isolated_script_merge
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
# TODO(jmadill): Merge QPA files into one. http://anglebug.com/5236
return standard_isolated_script_merge.StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
046ca6c16b60c787e3dc865b3080a65d74ee437b
|
exploratory_analysis/author_scan.py
|
exploratory_analysis/author_scan.py
|
import os
from utils import Reader
import code
import sys
author_dict = dict()
def extract_authors(tweets):
# code.interact(local=dict(globals(), **locals()))
for t in tweets:
if t.is_post():
actor = t.actor()
create_key(actor['id'])
increment_author(actor, t.is_post())
elif t.is_share():
original_tweet = t.data['object']
actor = original_tweet['actor']
create_key(actor['id'])
increment_author(actor, t.is_post())
else:
print 'Neither post nor share:', t.id()
def increment_author(actor, is_post):
dict_value = author_dict[actor['id']]
dict_value[0] = actor['link']
dict_value[1] = actor['preferredUsername']
dict_value[2] = actor['displayName']
if is_post:
dict_value[3] += 1
else:
dict_value[4] += 1
def create_key(actor_id):
if actor_id not in author_dict.keys():
# link, username, display_name, post, post that gotten shared
default_value = ['', '', '', 0, 0]
author_dict[actor_id] = default_value
def print_all():
for k in author_dict.keys():
value = author_dict[k]
print '"{}","{}","{}","{}",{},{}'.format(k, value[0], value[1], value[2], value[3], value[4])
if __name__ == '__main__':
# coding=utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
for f in files:
extract_authors(Reader.read_file(f))
print_all()
# code.interact(local=dict(globals(), **locals()))
|
Add code to see what authors there are in the dataset
|
Add code to see what authors there are in the dataset
|
Python
|
apache-2.0
|
chuajiesheng/twitter-sentiment-analysis
|
Add code to see what authors there are in the dataset
|
import os
from utils import Reader
import code
import sys
author_dict = dict()
def extract_authors(tweets):
# code.interact(local=dict(globals(), **locals()))
for t in tweets:
if t.is_post():
actor = t.actor()
create_key(actor['id'])
increment_author(actor, t.is_post())
elif t.is_share():
original_tweet = t.data['object']
actor = original_tweet['actor']
create_key(actor['id'])
increment_author(actor, t.is_post())
else:
print 'Neither post nor share:', t.id()
def increment_author(actor, is_post):
dict_value = author_dict[actor['id']]
dict_value[0] = actor['link']
dict_value[1] = actor['preferredUsername']
dict_value[2] = actor['displayName']
if is_post:
dict_value[3] += 1
else:
dict_value[4] += 1
def create_key(actor_id):
if actor_id not in author_dict.keys():
# link, username, display_name, post, post that gotten shared
default_value = ['', '', '', 0, 0]
author_dict[actor_id] = default_value
def print_all():
for k in author_dict.keys():
value = author_dict[k]
print '"{}","{}","{}","{}",{},{}'.format(k, value[0], value[1], value[2], value[3], value[4])
if __name__ == '__main__':
# coding=utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
for f in files:
extract_authors(Reader.read_file(f))
print_all()
# code.interact(local=dict(globals(), **locals()))
|
<commit_before><commit_msg>Add code to see what authors there are in the dataset<commit_after>
|
import os
from utils import Reader
import code
import sys
author_dict = dict()
def extract_authors(tweets):
# code.interact(local=dict(globals(), **locals()))
for t in tweets:
if t.is_post():
actor = t.actor()
create_key(actor['id'])
increment_author(actor, t.is_post())
elif t.is_share():
original_tweet = t.data['object']
actor = original_tweet['actor']
create_key(actor['id'])
increment_author(actor, t.is_post())
else:
print 'Neither post nor share:', t.id()
def increment_author(actor, is_post):
dict_value = author_dict[actor['id']]
dict_value[0] = actor['link']
dict_value[1] = actor['preferredUsername']
dict_value[2] = actor['displayName']
if is_post:
dict_value[3] += 1
else:
dict_value[4] += 1
def create_key(actor_id):
if actor_id not in author_dict.keys():
# link, username, display_name, post, post that gotten shared
default_value = ['', '', '', 0, 0]
author_dict[actor_id] = default_value
def print_all():
for k in author_dict.keys():
value = author_dict[k]
print '"{}","{}","{}","{}",{},{}'.format(k, value[0], value[1], value[2], value[3], value[4])
if __name__ == '__main__':
# coding=utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
for f in files:
extract_authors(Reader.read_file(f))
print_all()
# code.interact(local=dict(globals(), **locals()))
|
Add code to see what authors there are in the datasetimport os
from utils import Reader
import code
import sys
author_dict = dict()
def extract_authors(tweets):
# code.interact(local=dict(globals(), **locals()))
for t in tweets:
if t.is_post():
actor = t.actor()
create_key(actor['id'])
increment_author(actor, t.is_post())
elif t.is_share():
original_tweet = t.data['object']
actor = original_tweet['actor']
create_key(actor['id'])
increment_author(actor, t.is_post())
else:
print 'Neither post nor share:', t.id()
def increment_author(actor, is_post):
dict_value = author_dict[actor['id']]
dict_value[0] = actor['link']
dict_value[1] = actor['preferredUsername']
dict_value[2] = actor['displayName']
if is_post:
dict_value[3] += 1
else:
dict_value[4] += 1
def create_key(actor_id):
if actor_id not in author_dict.keys():
# link, username, display_name, post, post that gotten shared
default_value = ['', '', '', 0, 0]
author_dict[actor_id] = default_value
def print_all():
for k in author_dict.keys():
value = author_dict[k]
print '"{}","{}","{}","{}",{},{}'.format(k, value[0], value[1], value[2], value[3], value[4])
if __name__ == '__main__':
# coding=utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
for f in files:
extract_authors(Reader.read_file(f))
print_all()
# code.interact(local=dict(globals(), **locals()))
|
<commit_before><commit_msg>Add code to see what authors there are in the dataset<commit_after>import os
from utils import Reader
import code
import sys
author_dict = dict()
def extract_authors(tweets):
# code.interact(local=dict(globals(), **locals()))
for t in tweets:
if t.is_post():
actor = t.actor()
create_key(actor['id'])
increment_author(actor, t.is_post())
elif t.is_share():
original_tweet = t.data['object']
actor = original_tweet['actor']
create_key(actor['id'])
increment_author(actor, t.is_post())
else:
print 'Neither post nor share:', t.id()
def increment_author(actor, is_post):
dict_value = author_dict[actor['id']]
dict_value[0] = actor['link']
dict_value[1] = actor['preferredUsername']
dict_value[2] = actor['displayName']
if is_post:
dict_value[3] += 1
else:
dict_value[4] += 1
def create_key(actor_id):
if actor_id not in author_dict.keys():
# link, username, display_name, post, post that gotten shared
default_value = ['', '', '', 0, 0]
author_dict[actor_id] = default_value
def print_all():
for k in author_dict.keys():
value = author_dict[k]
print '"{}","{}","{}","{}",{},{}'.format(k, value[0], value[1], value[2], value[3], value[4])
if __name__ == '__main__':
# coding=utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
for f in files:
extract_authors(Reader.read_file(f))
print_all()
# code.interact(local=dict(globals(), **locals()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.