commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a08e7d9949059b95b2eda16c94f0a3fd8853dc73
|
src/run_algorithm.py
|
src/run_algorithm.py
|
import argparse
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
def main():
parser = argparse.ArgumentParser(description='Build bow corpus on the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('directory', help='directory for the arxiv txt files')
parser.add_argument('algorithm', help='algoritm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
# Build corpus from largest int and directory (Check valid directory)
# For each int in the param list then apply the corpus algorithm using a sliced corpus
# Save any mm, index files, etc to a directory so they can be used again.
# Log temporal time
if __name__ == "__main__": main()
|
Add script to apply algorithms to corpus
|
Add script to apply algorithms to corpus
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add script to apply algorithms to corpus
|
import argparse
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
def main():
parser = argparse.ArgumentParser(description='Build bow corpus on the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('directory', help='directory for the arxiv txt files')
parser.add_argument('algorithm', help='algoritm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
# Build corpus from largest int and directory (Check valid directory)
# For each int in the param list then apply the corpus algorithm using a sliced corpus
# Save any mm, index files, etc to a directory so they can be used again.
# Log temporal time
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to apply algorithms to corpus<commit_after>
|
import argparse
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
def main():
parser = argparse.ArgumentParser(description='Build bow corpus on the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('directory', help='directory for the arxiv txt files')
parser.add_argument('algorithm', help='algoritm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
# Build corpus from largest int and directory (Check valid directory)
# For each int in the param list then apply the corpus algorithm using a sliced corpus
# Save any mm, index files, etc to a directory so they can be used again.
# Log temporal time
if __name__ == "__main__": main()
|
Add script to apply algorithms to corpusimport argparse
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
def main():
parser = argparse.ArgumentParser(description='Build bow corpus on the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('directory', help='directory for the arxiv txt files')
parser.add_argument('algorithm', help='algoritm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
# Build corpus from largest int and directory (Check valid directory)
# For each int in the param list then apply the corpus algorithm using a sliced corpus
# Save any mm, index files, etc to a directory so they can be used again.
# Log temporal time
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to apply algorithms to corpus<commit_after>import argparse
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
def main():
parser = argparse.ArgumentParser(description='Build bow corpus on the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('directory', help='directory for the arxiv txt files')
parser.add_argument('algorithm', help='algoritm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
# Build corpus from largest int and directory (Check valid directory)
# For each int in the param list then apply the corpus algorithm using a sliced corpus
# Save any mm, index files, etc to a directory so they can be used again.
# Log temporal time
if __name__ == "__main__": main()
|
|
d8fab96a12047969971b5d777408177b87eb578a
|
geotrek/tourism/tests/test_forms.py
|
geotrek/tourism/tests/test_forms.py
|
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory
from geotrek.tourism.forms import TouristicEventForm
class PathFormTest(TestCase):
def test_begin_end_date(self):
user = UserFactory()
form1 = TouristicEventForm(
user=user,
data={
'geom': '{"type": "Point", "coordinates":[0, 0]}',
'name_en': 'test',
'begin_date': '2022-01-20',
'end_date': '2022-01-10',
}
)
self.assertFalse(form1.is_valid(), str(form1.errors))
|
Add test form error when begin date is after end date
|
Add test form error when begin date is after end date
|
Python
|
bsd-2-clause
|
GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin
|
Add test form error when begin date is after end date
|
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory
from geotrek.tourism.forms import TouristicEventForm
class PathFormTest(TestCase):
def test_begin_end_date(self):
user = UserFactory()
form1 = TouristicEventForm(
user=user,
data={
'geom': '{"type": "Point", "coordinates":[0, 0]}',
'name_en': 'test',
'begin_date': '2022-01-20',
'end_date': '2022-01-10',
}
)
self.assertFalse(form1.is_valid(), str(form1.errors))
|
<commit_before><commit_msg>Add test form error when begin date is after end date<commit_after>
|
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory
from geotrek.tourism.forms import TouristicEventForm
class PathFormTest(TestCase):
def test_begin_end_date(self):
user = UserFactory()
form1 = TouristicEventForm(
user=user,
data={
'geom': '{"type": "Point", "coordinates":[0, 0]}',
'name_en': 'test',
'begin_date': '2022-01-20',
'end_date': '2022-01-10',
}
)
self.assertFalse(form1.is_valid(), str(form1.errors))
|
Add test form error when begin date is after end date
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory
from geotrek.tourism.forms import TouristicEventForm
class PathFormTest(TestCase):
def test_begin_end_date(self):
user = UserFactory()
form1 = TouristicEventForm(
user=user,
data={
'geom': '{"type": "Point", "coordinates":[0, 0]}',
'name_en': 'test',
'begin_date': '2022-01-20',
'end_date': '2022-01-10',
}
)
self.assertFalse(form1.is_valid(), str(form1.errors))
|
<commit_before><commit_msg>Add test form error when begin date is after end date<commit_after>
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory
from geotrek.tourism.forms import TouristicEventForm
class PathFormTest(TestCase):
def test_begin_end_date(self):
user = UserFactory()
form1 = TouristicEventForm(
user=user,
data={
'geom': '{"type": "Point", "coordinates":[0, 0]}',
'name_en': 'test',
'begin_date': '2022-01-20',
'end_date': '2022-01-10',
}
)
self.assertFalse(form1.is_valid(), str(form1.errors))
|
|
0f0c3bb7b54c69bf121c11ea0d8e5a0b259bfb15
|
test/trainer_test.py
|
test/trainer_test.py
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
Exclude hf from tested trainers. :-/
|
Exclude hf from tested trainers. :-/
|
Python
|
mit
|
lmjohns3/theanets,chrinide/theanets,devdoer/theanets
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
Exclude hf from tested trainers. :-/
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
<commit_msg>Exclude hf from tested trainers. :-/<commit_after>
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
Exclude hf from tested trainers. :-/import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
<commit_msg>Exclude hf from tested trainers. :-/<commit_after>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
9c504147511c7f1fadcb1effb61f482cd4fe132e
|
tests/test_runner.py
|
tests/test_runner.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'git@bitbucket.org:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
Add basic runner tests, will add more later
|
Add basic runner tests, will add more later
|
Python
|
mit
|
bosondata/badwolf,bosondata/badwolf,bosondata/badwolf
|
Add basic runner tests, will add more later
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'git@bitbucket.org:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
<commit_before><commit_msg>Add basic runner tests, will add more later<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'git@bitbucket.org:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
Add basic runner tests, will add more later# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'git@bitbucket.org:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
<commit_before><commit_msg>Add basic runner tests, will add more later<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'git@bitbucket.org:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
|
d57bf8b0995496d8cabc81410154ab64a0673e01
|
databroker/sources/dummy_sources/_metadataStore/api/analysis.py
|
databroker/sources/dummy_sources/_metadataStore/api/analysis.py
|
headers = []
beamline_configs = []
event_descriptors = []
events = []
def find2(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return {'headers': headers, 'beamline_configs': beamline_configs,
'event_descriptors': event_descriptors, 'events': events}
|
from datetime import datetime as dt
class DummyEventDescriptor(object):
def __init__(self):
self.keys = {'temp': {'source': 'PV:blah'},
'picture': {'source': 'CCD:blah',
'external': 'FILESTORE!!!!'}}
class DummyEvent(object):
def __init__(self):
self.ev_desc = DummyEventDescriptor()
self.data = {'temp': {'value': 273, 'timestamp': None},
'picture': {'value': 'np.ones((10, 10))',
'timestamp': None}}
self.time = dt(2014, 01, 01, 1, 2, 3)
def find(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return 3 * [DummyEvent()]
|
Update dummy MDS to use find and return a list of events.
|
Update dummy MDS to use find and return a list of events.
|
Python
|
bsd-3-clause
|
ericdill/databroker,NSLS-II/datamuxer,ericdill/datamuxer,NSLS-II/dataportal,NSLS-II/dataportal,ericdill/databroker,danielballan/datamuxer,danielballan/dataportal,danielballan/dataportal,tacaswell/dataportal,ericdill/datamuxer,tacaswell/dataportal,danielballan/datamuxer
|
headers = []
beamline_configs = []
event_descriptors = []
events = []
def find2(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return {'headers': headers, 'beamline_configs': beamline_configs,
'event_descriptors': event_descriptors, 'events': events}
Update dummy MDS to use find and return a list of events.
|
from datetime import datetime as dt
class DummyEventDescriptor(object):
def __init__(self):
self.keys = {'temp': {'source': 'PV:blah'},
'picture': {'source': 'CCD:blah',
'external': 'FILESTORE!!!!'}}
class DummyEvent(object):
def __init__(self):
self.ev_desc = DummyEventDescriptor()
self.data = {'temp': {'value': 273, 'timestamp': None},
'picture': {'value': 'np.ones((10, 10))',
'timestamp': None}}
self.time = dt(2014, 01, 01, 1, 2, 3)
def find(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return 3 * [DummyEvent()]
|
<commit_before>headers = []
beamline_configs = []
event_descriptors = []
events = []
def find2(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return {'headers': headers, 'beamline_configs': beamline_configs,
'event_descriptors': event_descriptors, 'events': events}
<commit_msg>Update dummy MDS to use find and return a list of events.<commit_after>
|
from datetime import datetime as dt
class DummyEventDescriptor(object):
def __init__(self):
self.keys = {'temp': {'source': 'PV:blah'},
'picture': {'source': 'CCD:blah',
'external': 'FILESTORE!!!!'}}
class DummyEvent(object):
def __init__(self):
self.ev_desc = DummyEventDescriptor()
self.data = {'temp': {'value': 273, 'timestamp': None},
'picture': {'value': 'np.ones((10, 10))',
'timestamp': None}}
self.time = dt(2014, 01, 01, 1, 2, 3)
def find(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return 3 * [DummyEvent()]
|
headers = []
beamline_configs = []
event_descriptors = []
events = []
def find2(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return {'headers': headers, 'beamline_configs': beamline_configs,
'event_descriptors': event_descriptors, 'events': events}
Update dummy MDS to use find and return a list of events.from datetime import datetime as dt
class DummyEventDescriptor(object):
def __init__(self):
self.keys = {'temp': {'source': 'PV:blah'},
'picture': {'source': 'CCD:blah',
'external': 'FILESTORE!!!!'}}
class DummyEvent(object):
def __init__(self):
self.ev_desc = DummyEventDescriptor()
self.data = {'temp': {'value': 273, 'timestamp': None},
'picture': {'value': 'np.ones((10, 10))',
'timestamp': None}}
self.time = dt(2014, 01, 01, 1, 2, 3)
def find(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return 3 * [DummyEvent()]
|
<commit_before>headers = []
beamline_configs = []
event_descriptors = []
events = []
def find2(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return {'headers': headers, 'beamline_configs': beamline_configs,
'event_descriptors': event_descriptors, 'events': events}
<commit_msg>Update dummy MDS to use find and return a list of events.<commit_after>from datetime import datetime as dt
class DummyEventDescriptor(object):
def __init__(self):
self.keys = {'temp': {'source': 'PV:blah'},
'picture': {'source': 'CCD:blah',
'external': 'FILESTORE!!!!'}}
class DummyEvent(object):
def __init__(self):
self.ev_desc = DummyEventDescriptor()
self.data = {'temp': {'value': 273, 'timestamp': None},
'picture': {'value': 'np.ones((10, 10))',
'timestamp': None}}
self.time = dt(2014, 01, 01, 1, 2, 3)
def find(header_id=None, scan_id=None, owner=None, start_time=None,
beamline_id=None, end_time=None):
return 3 * [DummyEvent()]
|
45b91fd2cf8c679ba791e6f96c1e51d2a7ef4082
|
python/31_bfs_dfs/graph_application.py
|
python/31_bfs_dfs/graph_application.py
|
# -*- coding:utf-8 -*-
from collections import deque
from graph import Undigraph
def find_vertex_by_degree(graph, s, degree):
if len(graph) <= 1:
return []
if degree == 0:
return [s]
d_vertices = []
queue = deque()
prev = [-1] * len(graph)
visited = [False] * len(graph)
visited[s] = True
queue.append(s)
while len(queue) > 0:
sz = len(queue)
for i in range(sz):
v = queue.popleft()
for adj_v in graph[v]:
if not visited[adj_v]:
prev[adj_v] = v
visited[adj_v] = True
queue.append(adj_v)
degree -= 1
if degree == 0 and len(queue) != 0:
return queue
if __name__ == '__main__':
g = Undigraph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
print(find_vertex_by_degree(g, 0, 4))
|
Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy
|
Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy
|
Python
|
apache-2.0
|
wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo,wangzheng0822/algo
|
Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy
|
# -*- coding:utf-8 -*-
from collections import deque
from graph import Undigraph
def find_vertex_by_degree(graph, s, degree):
if len(graph) <= 1:
return []
if degree == 0:
return [s]
d_vertices = []
queue = deque()
prev = [-1] * len(graph)
visited = [False] * len(graph)
visited[s] = True
queue.append(s)
while len(queue) > 0:
sz = len(queue)
for i in range(sz):
v = queue.popleft()
for adj_v in graph[v]:
if not visited[adj_v]:
prev[adj_v] = v
visited[adj_v] = True
queue.append(adj_v)
degree -= 1
if degree == 0 and len(queue) != 0:
return queue
if __name__ == '__main__':
g = Undigraph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
print(find_vertex_by_degree(g, 0, 4))
|
<commit_before><commit_msg>Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy<commit_after>
|
# -*- coding:utf-8 -*-
from collections import deque
from graph import Undigraph
def find_vertex_by_degree(graph, s, degree):
if len(graph) <= 1:
return []
if degree == 0:
return [s]
d_vertices = []
queue = deque()
prev = [-1] * len(graph)
visited = [False] * len(graph)
visited[s] = True
queue.append(s)
while len(queue) > 0:
sz = len(queue)
for i in range(sz):
v = queue.popleft()
for adj_v in graph[v]:
if not visited[adj_v]:
prev[adj_v] = v
visited[adj_v] = True
queue.append(adj_v)
degree -= 1
if degree == 0 and len(queue) != 0:
return queue
if __name__ == '__main__':
g = Undigraph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
print(find_vertex_by_degree(g, 0, 4))
|
Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy# -*- coding:utf-8 -*-
from collections import deque
from graph import Undigraph
def find_vertex_by_degree(graph, s, degree):
if len(graph) <= 1:
return []
if degree == 0:
return [s]
d_vertices = []
queue = deque()
prev = [-1] * len(graph)
visited = [False] * len(graph)
visited[s] = True
queue.append(s)
while len(queue) > 0:
sz = len(queue)
for i in range(sz):
v = queue.popleft()
for adj_v in graph[v]:
if not visited[adj_v]:
prev[adj_v] = v
visited[adj_v] = True
queue.append(adj_v)
degree -= 1
if degree == 0 and len(queue) != 0:
return queue
if __name__ == '__main__':
g = Undigraph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
print(find_vertex_by_degree(g, 0, 4))
|
<commit_before><commit_msg>Add function , which is to find all the vertices of one vertex given the specific degree by using broad first search policy<commit_after># -*- coding:utf-8 -*-
from collections import deque
from graph import Undigraph
def find_vertex_by_degree(graph, s, degree):
if len(graph) <= 1:
return []
if degree == 0:
return [s]
d_vertices = []
queue = deque()
prev = [-1] * len(graph)
visited = [False] * len(graph)
visited[s] = True
queue.append(s)
while len(queue) > 0:
sz = len(queue)
for i in range(sz):
v = queue.popleft()
for adj_v in graph[v]:
if not visited[adj_v]:
prev[adj_v] = v
visited[adj_v] = True
queue.append(adj_v)
degree -= 1
if degree == 0 and len(queue) != 0:
return queue
if __name__ == '__main__':
g = Undigraph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
print(find_vertex_by_degree(g, 0, 4))
|
|
0ce164529f0b53dd25b4cc6b0c66fb8bbecc6ef5
|
test/create_image.py
|
test/create_image.py
|
import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
|
Test script for playing with PIL
|
Test script for playing with PIL
|
Python
|
mit
|
hugs/detour
|
Test script for playing with PIL
|
import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
|
<commit_before><commit_msg>Test script for playing with PIL<commit_after>
|
import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
|
Test script for playing with PILimport Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
|
<commit_before><commit_msg>Test script for playing with PIL<commit_after>import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
|
|
5c201b159d844e317c60665180760e8ecad957a2
|
server/tests/test_api.py
|
server/tests/test_api.py
|
from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
import json
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
Add tests for 'get comments' API
|
Add tests for 'get comments' API
Note that because of a bug in the testing setup the tests will fail when there are more than one.
Each individual test has been successfully tested.
|
Python
|
mit
|
MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS,MACSIFS/IFS
|
Add tests for 'get comments' API
Note that because of a bug in the testing setup the tests will fail when there are more than one.
Each individual test has been successfully tested.
|
from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
import json
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
<commit_before><commit_msg>Add tests for 'get comments' API
Note that because of a bug in the testing setup the tests will fail when there are more than one.
Each individual test has been successfully tested.<commit_after>
|
from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
import json
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
Add tests for 'get comments' API
Note that because of a bug in the testing setup the tests will fail when there are more than one.
Each individual test has been successfully tested.from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
import json
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
<commit_before><commit_msg>Add tests for 'get comments' API
Note that because of a bug in the testing setup the tests will fail when there are more than one.
Each individual test has been successfully tested.<commit_after>from server.tests.base import BaseTestCase
from server.models import db, Lecturer, Course, Lecture, Comment
import json
class GetCommentsApiTest(BaseTestCase):
def setUp(self):
super(GetCommentsApiTest, self).setUp()
simon = Lecturer('Simon', 'McCallum')
db.session.add(simon)
imt3601 = Course('IMT3601 - Game Programming', simon)
db.session.add(imt3601)
imt3601_l1 = Lecture('Lecture 1', imt3601)
db.session.add(imt3601_l1)
imt3601_l1_c1 = Comment('This is boring', imt3601_l1)
imt3601_l1_c2 = Comment('This is fun!', imt3601_l1)
db.session.add(imt3601_l1_c1)
db.session.add(imt3601_l1_c2)
db.session.commit()
def test_success(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.status_code == 200
def test_lecture_not_found(self):
rv = self.app.get('/api/0/lectures/2/comments')
assert rv.status_code == 404
def test_list(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert len(response['comments']) == 2
def test_content(self):
rv = self.app.get('/api/0/lectures/1/comments')
assert rv.headers['Content-Type'] == 'application/json'
response = json.loads(rv.data.decode('utf-8'))
assert response['comments'][0]['content'] == 'This is boring'
|
|
c80bdbaffe40f0f6877b36498175f7e8b80085d3
|
migrations/versions/0143_remove_reply_to.py
|
migrations/versions/0143_remove_reply_to.py
|
"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
from alembic import op
import sqlalchemy as sa
revision = '0143_remove_reply_to'
down_revision = '0142_validate_constraint'
def upgrade():
op.drop_column('services', 'letter_contact_block')
op.drop_column('services', 'reply_to_email_address')
op.drop_column('services_history', 'letter_contact_block')
op.drop_column('services_history', 'reply_to_email_address')
def downgrade():
op.add_column('services_history', sa.Column('reply_to_email_address', sa.TEXT(),
autoincrement=False, nullable=True))
op.add_column('services_history', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('reply_to_email_address', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
|
Remove email_reply_to and letter_contact_block from Services
|
Remove email_reply_to and letter_contact_block from Services
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove email_reply_to and letter_contact_block from Services
|
"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
from alembic import op
import sqlalchemy as sa
revision = '0143_remove_reply_to'
down_revision = '0142_validate_constraint'
def upgrade():
op.drop_column('services', 'letter_contact_block')
op.drop_column('services', 'reply_to_email_address')
op.drop_column('services_history', 'letter_contact_block')
op.drop_column('services_history', 'reply_to_email_address')
def downgrade():
op.add_column('services_history', sa.Column('reply_to_email_address', sa.TEXT(),
autoincrement=False, nullable=True))
op.add_column('services_history', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('reply_to_email_address', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
|
<commit_before><commit_msg>Remove email_reply_to and letter_contact_block from Services<commit_after>
|
"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
from alembic import op
import sqlalchemy as sa
revision = '0143_remove_reply_to'
down_revision = '0142_validate_constraint'
def upgrade():
op.drop_column('services', 'letter_contact_block')
op.drop_column('services', 'reply_to_email_address')
op.drop_column('services_history', 'letter_contact_block')
op.drop_column('services_history', 'reply_to_email_address')
def downgrade():
op.add_column('services_history', sa.Column('reply_to_email_address', sa.TEXT(),
autoincrement=False, nullable=True))
op.add_column('services_history', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('reply_to_email_address', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
|
Remove email_reply_to and letter_contact_block from Services"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
from alembic import op
import sqlalchemy as sa
revision = '0143_remove_reply_to'
down_revision = '0142_validate_constraint'
def upgrade():
op.drop_column('services', 'letter_contact_block')
op.drop_column('services', 'reply_to_email_address')
op.drop_column('services_history', 'letter_contact_block')
op.drop_column('services_history', 'reply_to_email_address')
def downgrade():
op.add_column('services_history', sa.Column('reply_to_email_address', sa.TEXT(),
autoincrement=False, nullable=True))
op.add_column('services_history', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('reply_to_email_address', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
|
<commit_before><commit_msg>Remove email_reply_to and letter_contact_block from Services<commit_after>"""
Revision ID: 0143_remove_reply_to
Revises: 0142_validate_constraint
Create Date: 2017-11-21 10:42:25.045444
"""
from alembic import op
import sqlalchemy as sa
revision = '0143_remove_reply_to'
down_revision = '0142_validate_constraint'
def upgrade():
op.drop_column('services', 'letter_contact_block')
op.drop_column('services', 'reply_to_email_address')
op.drop_column('services_history', 'letter_contact_block')
op.drop_column('services_history', 'reply_to_email_address')
def downgrade():
op.add_column('services_history', sa.Column('reply_to_email_address', sa.TEXT(),
autoincrement=False, nullable=True))
op.add_column('services_history', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('reply_to_email_address', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('letter_contact_block', sa.TEXT(), autoincrement=False, nullable=True))
|
|
f5bcac9a49b8bb259955d6ee1f585b3db251f59b
|
second/blog/migrations/0002_comment.py
|
second/blog/migrations/0002_comment.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
Add migration to add model for Comments
|
Add migration to add model for Comments
|
Python
|
mit
|
ugaliguy/Django-Tutorial-Projects,ugaliguy/Django-Tutorial-Projects
|
Add migration to add model for Comments
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
<commit_before><commit_msg>Add migration to add model for Comments<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
Add migration to add model for Comments# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
<commit_before><commit_msg>Add migration to add model for Comments<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-18 03:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
|
6e513cfb73857b8a15efadcaa66492599f749853
|
oidc_provider/tests/test_logout_endpoint.py
|
oidc_provider/tests/test_logout_endpoint.py
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.views import *
from oidc_provider.tests.app.utils import *
class UserInfoTestCase(TestCase):
def setUp(self):
self.user = create_fake_user()
self.url = reverse('oidc_provider:logout')
def test_shows_logged_out_page(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
def test_redirects(self):
response = self.client.get(self.url, data={'post_logout_redirect_uri': 'http://example.com/logged_out.html'})
self.assertRedirects(response, 'http://example.com/logged_out.html',
fetch_redirect_response=False)
def test_user_is_logged_out(self):
self.assertTrue(self.client.login(username=self.user.username, password='1234'))
self.assertGreater(len(self.client.session.keys()), 0)
self.client.get(self.url)
self.assertEqual(len(self.client.session.keys()), 0)
|
Add tests for logout view
|
Add tests for logout view
|
Python
|
mit
|
juanifioren/django-oidc-provider,nmohoric/django-oidc-provider,ByteInternet/django-oidc-provider,torreco/django-oidc-provider,bunnyinc/django-oidc-provider,bunnyinc/django-oidc-provider,wayward710/django-oidc-provider,ByteInternet/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,nmohoric/django-oidc-provider,wojtek-fliposports/django-oidc-provider,juanifioren/django-oidc-provider,wojtek-fliposports/django-oidc-provider
|
Add tests for logout view
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.views import *
from oidc_provider.tests.app.utils import *
class UserInfoTestCase(TestCase):
def setUp(self):
self.user = create_fake_user()
self.url = reverse('oidc_provider:logout')
def test_shows_logged_out_page(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
def test_redirects(self):
response = self.client.get(self.url, data={'post_logout_redirect_uri': 'http://example.com/logged_out.html'})
self.assertRedirects(response, 'http://example.com/logged_out.html',
fetch_redirect_response=False)
def test_user_is_logged_out(self):
self.assertTrue(self.client.login(username=self.user.username, password='1234'))
self.assertGreater(len(self.client.session.keys()), 0)
self.client.get(self.url)
self.assertEqual(len(self.client.session.keys()), 0)
|
<commit_before><commit_msg>Add tests for logout view<commit_after>
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.views import *
from oidc_provider.tests.app.utils import *
class UserInfoTestCase(TestCase):
def setUp(self):
self.user = create_fake_user()
self.url = reverse('oidc_provider:logout')
def test_shows_logged_out_page(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
def test_redirects(self):
response = self.client.get(self.url, data={'post_logout_redirect_uri': 'http://example.com/logged_out.html'})
self.assertRedirects(response, 'http://example.com/logged_out.html',
fetch_redirect_response=False)
def test_user_is_logged_out(self):
self.assertTrue(self.client.login(username=self.user.username, password='1234'))
self.assertGreater(len(self.client.session.keys()), 0)
self.client.get(self.url)
self.assertEqual(len(self.client.session.keys()), 0)
|
Add tests for logout viewfrom django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.views import *
from oidc_provider.tests.app.utils import *
class UserInfoTestCase(TestCase):
def setUp(self):
self.user = create_fake_user()
self.url = reverse('oidc_provider:logout')
def test_shows_logged_out_page(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
def test_redirects(self):
response = self.client.get(self.url, data={'post_logout_redirect_uri': 'http://example.com/logged_out.html'})
self.assertRedirects(response, 'http://example.com/logged_out.html',
fetch_redirect_response=False)
def test_user_is_logged_out(self):
self.assertTrue(self.client.login(username=self.user.username, password='1234'))
self.assertGreater(len(self.client.session.keys()), 0)
self.client.get(self.url)
self.assertEqual(len(self.client.session.keys()), 0)
|
<commit_before><commit_msg>Add tests for logout view<commit_after>from django.core.urlresolvers import reverse
from django.test import TestCase
from oidc_provider.views import *
from oidc_provider.tests.app.utils import *
class UserInfoTestCase(TestCase):
def setUp(self):
self.user = create_fake_user()
self.url = reverse('oidc_provider:logout')
def test_shows_logged_out_page(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
def test_redirects(self):
response = self.client.get(self.url, data={'post_logout_redirect_uri': 'http://example.com/logged_out.html'})
self.assertRedirects(response, 'http://example.com/logged_out.html',
fetch_redirect_response=False)
def test_user_is_logged_out(self):
self.assertTrue(self.client.login(username=self.user.username, password='1234'))
self.assertGreater(len(self.client.session.keys()), 0)
self.client.get(self.url)
self.assertEqual(len(self.client.session.keys()), 0)
|
|
e53d101827491bcee3fffa99314cbe9561884cb2
|
librisxl-tools/scripts/crunch-lddb-lines.py
|
librisxl-tools/scripts/crunch-lddb-lines.py
|
from __future__ import print_function
import json
import sys
import re
def parse_select(sel_str):
steps = []
match_rule = None
for word in sel_str.strip().split(' '):
if match_rule:
if match_rule == '=':
steps.append(lambda data, word=word: data == word)
elif match_rule == '=~':
matcher = re.compile(word)
steps.append(lambda data: isinstance(data, unicode) and matcher.match(data))
match_rule = None
continue
match_rule = None
if word == '{':
parent_steps, test_steps = steps, []
steps = test_steps
elif word == '}':
parent_steps.append(lambda data, steps=test_steps: match_selector(steps, data))
steps = parent_steps
parent_steps, test_steps = None, None
elif word in {'=', '=~'}:
match_rule = word
elif word.isdigit():
steps.append(int(word))
else:
steps.append(word)
return steps
def match_selector(selector, data):
current = data
for i, step in enumerate(selector):
if callable(step):
if step(current):
continue
else:
return False
if isinstance(step, int):
current = current[step]
else:
if isinstance(current, list):
selector_trail = selector[i:]
for item in current:
if match_selector(selector_trail, item):
return item
return False
else:
current = current.get(step)
if current is None:
return False
return current
if __name__ == '__main__':
args = sys.argv[1:]
selector = parse_select(args.pop(0)) if args else None
match_count = 0
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
if i % 100000 == 0:
print("At line", i, file=sys.stderr)
try:
data = json.loads(l)
try:
data_id = data.get('@id') or data['@graph'][0]['@id']
except KeyError:
data_id = None
if selector:
result = match_selector(selector, data)
if result:
match_count += 1
print("Line", i, "id", data_id, "matched on", json.dumps(result))
sys.stdout.flush()
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
for char_index in re.findall(r'.+\(char (\d+)\)$', e.message):
char_index = int(char_index)
print(l[char_index - 20 if char_index > 20 else char_index : char_index + 20], file=sys.stderr)
print(('-' * 20) + '^', file=sys.stderr)
break
if selector:
print("Total matches:", match_count)
|
Add simple crunch and query LDDB script
|
Add simple crunch and query LDDB script
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Add simple crunch and query LDDB script
|
from __future__ import print_function
import json
import sys
import re
def parse_select(sel_str):
steps = []
match_rule = None
for word in sel_str.strip().split(' '):
if match_rule:
if match_rule == '=':
steps.append(lambda data, word=word: data == word)
elif match_rule == '=~':
matcher = re.compile(word)
steps.append(lambda data: isinstance(data, unicode) and matcher.match(data))
match_rule = None
continue
match_rule = None
if word == '{':
parent_steps, test_steps = steps, []
steps = test_steps
elif word == '}':
parent_steps.append(lambda data, steps=test_steps: match_selector(steps, data))
steps = parent_steps
parent_steps, test_steps = None, None
elif word in {'=', '=~'}:
match_rule = word
elif word.isdigit():
steps.append(int(word))
else:
steps.append(word)
return steps
def match_selector(selector, data):
current = data
for i, step in enumerate(selector):
if callable(step):
if step(current):
continue
else:
return False
if isinstance(step, int):
current = current[step]
else:
if isinstance(current, list):
selector_trail = selector[i:]
for item in current:
if match_selector(selector_trail, item):
return item
return False
else:
current = current.get(step)
if current is None:
return False
return current
if __name__ == '__main__':
args = sys.argv[1:]
selector = parse_select(args.pop(0)) if args else None
match_count = 0
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
if i % 100000 == 0:
print("At line", i, file=sys.stderr)
try:
data = json.loads(l)
try:
data_id = data.get('@id') or data['@graph'][0]['@id']
except KeyError:
data_id = None
if selector:
result = match_selector(selector, data)
if result:
match_count += 1
print("Line", i, "id", data_id, "matched on", json.dumps(result))
sys.stdout.flush()
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
for char_index in re.findall(r'.+\(char (\d+)\)$', e.message):
char_index = int(char_index)
print(l[char_index - 20 if char_index > 20 else char_index : char_index + 20], file=sys.stderr)
print(('-' * 20) + '^', file=sys.stderr)
break
if selector:
print("Total matches:", match_count)
|
<commit_before><commit_msg>Add simple crunch and query LDDB script<commit_after>
|
from __future__ import print_function
import json
import sys
import re
def parse_select(sel_str):
steps = []
match_rule = None
for word in sel_str.strip().split(' '):
if match_rule:
if match_rule == '=':
steps.append(lambda data, word=word: data == word)
elif match_rule == '=~':
matcher = re.compile(word)
steps.append(lambda data: isinstance(data, unicode) and matcher.match(data))
match_rule = None
continue
match_rule = None
if word == '{':
parent_steps, test_steps = steps, []
steps = test_steps
elif word == '}':
parent_steps.append(lambda data, steps=test_steps: match_selector(steps, data))
steps = parent_steps
parent_steps, test_steps = None, None
elif word in {'=', '=~'}:
match_rule = word
elif word.isdigit():
steps.append(int(word))
else:
steps.append(word)
return steps
def match_selector(selector, data):
current = data
for i, step in enumerate(selector):
if callable(step):
if step(current):
continue
else:
return False
if isinstance(step, int):
current = current[step]
else:
if isinstance(current, list):
selector_trail = selector[i:]
for item in current:
if match_selector(selector_trail, item):
return item
return False
else:
current = current.get(step)
if current is None:
return False
return current
if __name__ == '__main__':
args = sys.argv[1:]
selector = parse_select(args.pop(0)) if args else None
match_count = 0
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
if i % 100000 == 0:
print("At line", i, file=sys.stderr)
try:
data = json.loads(l)
try:
data_id = data.get('@id') or data['@graph'][0]['@id']
except KeyError:
data_id = None
if selector:
result = match_selector(selector, data)
if result:
match_count += 1
print("Line", i, "id", data_id, "matched on", json.dumps(result))
sys.stdout.flush()
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
for char_index in re.findall(r'.+\(char (\d+)\)$', e.message):
char_index = int(char_index)
print(l[char_index - 20 if char_index > 20 else char_index : char_index + 20], file=sys.stderr)
print(('-' * 20) + '^', file=sys.stderr)
break
if selector:
print("Total matches:", match_count)
|
Add simple crunch and query LDDB scriptfrom __future__ import print_function
import json
import sys
import re
def parse_select(sel_str):
steps = []
match_rule = None
for word in sel_str.strip().split(' '):
if match_rule:
if match_rule == '=':
steps.append(lambda data, word=word: data == word)
elif match_rule == '=~':
matcher = re.compile(word)
steps.append(lambda data: isinstance(data, unicode) and matcher.match(data))
match_rule = None
continue
match_rule = None
if word == '{':
parent_steps, test_steps = steps, []
steps = test_steps
elif word == '}':
parent_steps.append(lambda data, steps=test_steps: match_selector(steps, data))
steps = parent_steps
parent_steps, test_steps = None, None
elif word in {'=', '=~'}:
match_rule = word
elif word.isdigit():
steps.append(int(word))
else:
steps.append(word)
return steps
def match_selector(selector, data):
current = data
for i, step in enumerate(selector):
if callable(step):
if step(current):
continue
else:
return False
if isinstance(step, int):
current = current[step]
else:
if isinstance(current, list):
selector_trail = selector[i:]
for item in current:
if match_selector(selector_trail, item):
return item
return False
else:
current = current.get(step)
if current is None:
return False
return current
if __name__ == '__main__':
args = sys.argv[1:]
selector = parse_select(args.pop(0)) if args else None
match_count = 0
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
if i % 100000 == 0:
print("At line", i, file=sys.stderr)
try:
data = json.loads(l)
try:
data_id = data.get('@id') or data['@graph'][0]['@id']
except KeyError:
data_id = None
if selector:
result = match_selector(selector, data)
if result:
match_count += 1
print("Line", i, "id", data_id, "matched on", json.dumps(result))
sys.stdout.flush()
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
for char_index in re.findall(r'.+\(char (\d+)\)$', e.message):
char_index = int(char_index)
print(l[char_index - 20 if char_index > 20 else char_index : char_index + 20], file=sys.stderr)
print(('-' * 20) + '^', file=sys.stderr)
break
if selector:
print("Total matches:", match_count)
|
<commit_before><commit_msg>Add simple crunch and query LDDB script<commit_after>from __future__ import print_function
import json
import sys
import re
def parse_select(sel_str):
steps = []
match_rule = None
for word in sel_str.strip().split(' '):
if match_rule:
if match_rule == '=':
steps.append(lambda data, word=word: data == word)
elif match_rule == '=~':
matcher = re.compile(word)
steps.append(lambda data: isinstance(data, unicode) and matcher.match(data))
match_rule = None
continue
match_rule = None
if word == '{':
parent_steps, test_steps = steps, []
steps = test_steps
elif word == '}':
parent_steps.append(lambda data, steps=test_steps: match_selector(steps, data))
steps = parent_steps
parent_steps, test_steps = None, None
elif word in {'=', '=~'}:
match_rule = word
elif word.isdigit():
steps.append(int(word))
else:
steps.append(word)
return steps
def match_selector(selector, data):
current = data
for i, step in enumerate(selector):
if callable(step):
if step(current):
continue
else:
return False
if isinstance(step, int):
current = current[step]
else:
if isinstance(current, list):
selector_trail = selector[i:]
for item in current:
if match_selector(selector_trail, item):
return item
return False
else:
current = current.get(step)
if current is None:
return False
return current
if __name__ == '__main__':
args = sys.argv[1:]
selector = parse_select(args.pop(0)) if args else None
match_count = 0
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
l = l.replace(b'\\\\"', b'\\"')
if i % 100000 == 0:
print("At line", i, file=sys.stderr)
try:
data = json.loads(l)
try:
data_id = data.get('@id') or data['@graph'][0]['@id']
except KeyError:
data_id = None
if selector:
result = match_selector(selector, data)
if result:
match_count += 1
print("Line", i, "id", data_id, "matched on", json.dumps(result))
sys.stdout.flush()
except ValueError as e:
print("ERROR at", i, "in data:", file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
for char_index in re.findall(r'.+\(char (\d+)\)$', e.message):
char_index = int(char_index)
print(l[char_index - 20 if char_index > 20 else char_index : char_index + 20], file=sys.stderr)
print(('-' * 20) + '^', file=sys.stderr)
break
if selector:
print("Total matches:", match_count)
|
|
b85a316edc3362c898f935f7bb4dea1a324b7ac6
|
CodeFights/functionsComposition.py
|
CodeFights/functionsComposition.py
|
#!/usr/local/bin/python
# Code Fights Functions Composition Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights functions composition problem
|
Solve Code Fights functions composition problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights functions composition problem
|
#!/usr/local/bin/python
# Code Fights Functions Composition Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights functions composition problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Functions Composition Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights functions composition problem#!/usr/local/bin/python
# Code Fights Functions Composition Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights functions composition problem<commit_after>#!/usr/local/bin/python
# Code Fights Functions Composition Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
57c2a013d3f3c671cfedd6e66aa176572d09d87e
|
setup.py
|
setup.py
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://dev.pocoo.org/hg/speaklater-main',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://github.com/mitsuhiko/speaklater',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
Switch to git part two, this time URL for website
|
Switch to git part two, this time URL for website
|
Python
|
bsd-3-clause
|
quokkaproject/speaklater,ThomasWaldmann/speaklater,jmagnusson/speaklater,mitsuhiko/speaklater
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://dev.pocoo.org/hg/speaklater-main',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
Switch to git part two, this time URL for website
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://github.com/mitsuhiko/speaklater',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
<commit_before>import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://dev.pocoo.org/hg/speaklater-main',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
<commit_msg>Switch to git part two, this time URL for website<commit_after>
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://github.com/mitsuhiko/speaklater',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://dev.pocoo.org/hg/speaklater-main',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
Switch to git part two, this time URL for websiteimport os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://github.com/mitsuhiko/speaklater',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
<commit_before>import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://dev.pocoo.org/hg/speaklater-main',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
<commit_msg>Switch to git part two, this time URL for website<commit_after>import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_docs():
result = []
in_docs = False
f = open(os.path.join(os.path.dirname(__file__), 'speaklater.py'))
try:
for line in f:
if in_docs:
if line.lstrip().startswith(':copyright:'):
break
result.append(line[4:].rstrip())
elif line.strip() == 'r"""':
in_docs = True
finally:
f.close()
return '\n'.join(result)
setup(
name='speaklater',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
version='1.2',
url='http://github.com/mitsuhiko/speaklater',
py_modules=['speaklater'],
description='implements a lazy string for python useful for use with gettext',
long_description=get_docs(),
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Internationalization',
'Programming Language :: Python'
]
)
|
23696a4ba6b721248d10957fb70c2b9bd6433b84
|
tools/gyp-explain.py
|
tools/gyp-explain.py
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
Add a small tool to answer questions like "Why does target A depend on target B".
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
M4sse/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,dushu1203/chromium.src,ltilve/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,mogoweb/chromium-crosswalk,nacl-webkit/chrome_deps,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,nacl-webkit/chrome_deps,ondra-novak/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,keishi/chromium,TheTypoMaster/chromium-crosswalk,rogerwang/chromium,dushu1203/chromium.src,Just-D/chromium-1,anirudhSK/chromium,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,littlstar/chromium.src,markYoungH/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,keishi/chromium,keishi/chromium,Jonekee/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,rogerwang/chromium,Just-D/chromium-1,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,dushu1203/chromium.src,hujiajie/pa-chromium,Pluto-tv/chromium-crosswalk,pozdnyakov/chromium-crosswalk,jaruba/chromium.src,hujiajie/pa-chromium,patrickm/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,nacl-webkit/chrome_deps,patrickm/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk,anirudhSK/chromium,Chilledheart/chromium,keishi/chromium,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,zcbenz/cefode-chromium,Fireblend/chromium-crosswalk,Just-D/chromium-1,nacl-webkit/chrome_deps,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,dushu1203/chromium.src,robclark/chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,robclark/chromium,robclark/chromium,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,fujunwei/chromium-crosswalk,robclark/chromium,rogerwang/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,rogerwang/chromium,mogoweb/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,M4sse/chromium.src,keishi/chromium,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,M4sse/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,Jonekee/chromium.src,robclark/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,patrickm/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,robclark/chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,jaruba/chromium.src,rogerwang/chromium,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,junmin-zhu/chromium-rivertrail,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,robclark/chromium,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,zcbenz/cefode-chromium,patrickm/chromium.src,littlstar/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Chilledheart/chromium,M4sse/chromium.src,Just-D/chromium-1,littlstar/chromium.src,hujiajie/pa-chromium,ChromiumWebApps/chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,Just-D/chromium-1,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,ondra-novak/chromium.src,anirudhSK/chromium,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,Pluto-tv/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,ltilve/chromium,jaruba/chromium.src,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,Just-D/chromium-1,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,anirudhSK/chromium,junmin-zhu/chromium-rivertrail,rogerwang/chromium,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,ltilve/chromium,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,Just-D/chromium-1,junmin-zhu/chromium-rivertrail,Chilledheart/chromium,keishi/chromium,ChromiumWebApps/chromium,hujiajie/pa-chromium,keishi/chromium,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,rogerwang/chromium,robclark/chromium,junmin-zhu/chromium-rivertrail,timopulkkinen/BubbleFish,anirudhSK/chromium,anirudhSK/chromium,krieger-od/nwjs_chromium.src,keishi/chromium,patrickm/chromium.src,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,jaruba/chromium.src,rogerwang/chromium,Just-D/chromium-1,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,keishi/chromium,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,zcbenz/cefode-chromium,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,zcbenz/cefode-chromium,ChromiumWebApps/chromium,robclark/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,timopulkkinen/BubbleFish,rogerwang/chromium,M4sse/chromium.src,robclark/chromium,rogerwang/chromium,hujiajie/pa-chromium,hgl888/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,mogoweb/chromium-crosswalk,anirudhSK/chromium,M4sse/chromium.src,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,patrickm/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,anirudhSK/chromium,zcbenz/cefode-chromium,keishi/chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,ltilve/chromium,keishi/chromium,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,littlstar/chromium.src,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
<commit_before><commit_msg>Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
<commit_before><commit_msg>Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
|
32a592c82ab0b727c56084063d49039bb693a2b0
|
corehq/apps/reports/commtrack/util.py
|
corehq/apps/reports/commtrack/util.py
|
from corehq.apps.locations.models import all_locations
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
if active_location:
return supply_point_ids([active_location] + active_location.descendants)
else:
return supply_point_ids(all_locations(domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
from corehq.apps.locations.models import SQLLocation
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
"""
Return a list of supply point ids for the selected location
and all of its descendants OR all supply point ids in the domain.
"""
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
supply_point_ids = []
if sql_location.supply_point_id:
supply_point_ids.append(sql_location.supply_point_id)
supply_point_ids += list(
filter_relevant(sql_location.get_descendants())
)
return supply_point_ids
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
Switch supply point id list lookup to SQL
|
Switch supply point id list lookup to SQL
Locally with 1000 users this took the product_data method from 1.7
seconds to .2 seconds.
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq
|
from corehq.apps.locations.models import all_locations
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
if active_location:
return supply_point_ids([active_location] + active_location.descendants)
else:
return supply_point_ids(all_locations(domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
Switch supply point id list lookup to SQL
Locally with 1000 users this took the product_data method from 1.7
seconds to .2 seconds.
|
from corehq.apps.locations.models import SQLLocation
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
"""
Return a list of supply point ids for the selected location
and all of its descendants OR all supply point ids in the domain.
"""
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
supply_point_ids = []
if sql_location.supply_point_id:
supply_point_ids.append(sql_location.supply_point_id)
supply_point_ids += list(
filter_relevant(sql_location.get_descendants())
)
return supply_point_ids
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
<commit_before>from corehq.apps.locations.models import all_locations
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
if active_location:
return supply_point_ids([active_location] + active_location.descendants)
else:
return supply_point_ids(all_locations(domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
<commit_msg>Switch supply point id list lookup to SQL
Locally with 1000 users this took the product_data method from 1.7
seconds to .2 seconds.<commit_after>
|
from corehq.apps.locations.models import SQLLocation
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
"""
Return a list of supply point ids for the selected location
and all of its descendants OR all supply point ids in the domain.
"""
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
supply_point_ids = []
if sql_location.supply_point_id:
supply_point_ids.append(sql_location.supply_point_id)
supply_point_ids += list(
filter_relevant(sql_location.get_descendants())
)
return supply_point_ids
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
from corehq.apps.locations.models import all_locations
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
if active_location:
return supply_point_ids([active_location] + active_location.descendants)
else:
return supply_point_ids(all_locations(domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
Switch supply point id list lookup to SQL
Locally with 1000 users this took the product_data method from 1.7
seconds to .2 seconds.from corehq.apps.locations.models import SQLLocation
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
"""
Return a list of supply point ids for the selected location
and all of its descendants OR all supply point ids in the domain.
"""
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
supply_point_ids = []
if sql_location.supply_point_id:
supply_point_ids.append(sql_location.supply_point_id)
supply_point_ids += list(
filter_relevant(sql_location.get_descendants())
)
return supply_point_ids
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
<commit_before>from corehq.apps.locations.models import all_locations
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
if active_location:
return supply_point_ids([active_location] + active_location.descendants)
else:
return supply_point_ids(all_locations(domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
<commit_msg>Switch supply point id list lookup to SQL
Locally with 1000 users this took the product_data method from 1.7
seconds to .2 seconds.<commit_after>from corehq.apps.locations.models import SQLLocation
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
def supply_point_ids(locations):
keys = [[loc.domain, loc._id] for loc in locations]
rows = SupplyPointCase.get_db().view(
'commtrack/supply_point_by_loc',
keys=keys,
include_docs=False,
)
return [row['id'] for row in rows]
def get_relevant_supply_point_ids(domain, active_location=None):
"""
Return a list of supply point ids for the selected location
and all of its descendants OR all supply point ids in the domain.
"""
def filter_relevant(queryset):
return queryset.filter(
supply_point_id__isnull=False
).values_list(
'supply_point_id',
flat=True
)
if active_location:
sql_location = active_location.sql_location
supply_point_ids = []
if sql_location.supply_point_id:
supply_point_ids.append(sql_location.supply_point_id)
supply_point_ids += list(
filter_relevant(sql_location.get_descendants())
)
return supply_point_ids
else:
return filter_relevant(SQLLocation.objects.filter(domain=domain))
def product_ids_filtered_by_program(domain, program):
products = Product.by_program_id(domain, program, False)
return [p['_id'] for p in products]
|
ee662e6119ef31978eec62491b6e7ce35f292518
|
python2.7-src/fibonacci_sequence.py
|
python2.7-src/fibonacci_sequence.py
|
#!/usr/bin/python
import sys
def fib(n):
a,b = 0,1
yield a
if n>0 :
yield b
for i in range(2,n+1):
a,b = b, a+b
yield b
num = int(sys.argv[1])
gen = fib(num)
for i in gen:
print i
|
Add function for Fibonacci sequence
|
Add function for Fibonacci sequence
|
Python
|
mit
|
diptin/dipti-coding-samples,diptin/dipti-coding-samples
|
Add function for Fibonacci sequence
|
#!/usr/bin/python
import sys
def fib(n):
a,b = 0,1
yield a
if n>0 :
yield b
for i in range(2,n+1):
a,b = b, a+b
yield b
num = int(sys.argv[1])
gen = fib(num)
for i in gen:
print i
|
<commit_before><commit_msg>Add function for Fibonacci sequence<commit_after>
|
#!/usr/bin/python
import sys
def fib(n):
a,b = 0,1
yield a
if n>0 :
yield b
for i in range(2,n+1):
a,b = b, a+b
yield b
num = int(sys.argv[1])
gen = fib(num)
for i in gen:
print i
|
Add function for Fibonacci sequence#!/usr/bin/python
import sys
def fib(n):
a,b = 0,1
yield a
if n>0 :
yield b
for i in range(2,n+1):
a,b = b, a+b
yield b
num = int(sys.argv[1])
gen = fib(num)
for i in gen:
print i
|
<commit_before><commit_msg>Add function for Fibonacci sequence<commit_after>#!/usr/bin/python
import sys
def fib(n):
a,b = 0,1
yield a
if n>0 :
yield b
for i in range(2,n+1):
a,b = b, a+b
yield b
num = int(sys.argv[1])
gen = fib(num)
for i in gen:
print i
|
|
348b7a2df539779e95cb72d00e7577db6740424f
|
corker/tests/test_main.py
|
corker/tests/test_main.py
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
def test_config():
mapper = Mapper()
def bob(request, link, **config):
def inner():
print "C:", config
return Response("Bob! %r" % config)
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper, config={'DB_URL': 'sqlite://'}) #('./')
app = TestApp(test_app)
ret = app.get('/bob/')
eq_(ret.body, "Bob! {'config': {'DB_URL': 'sqlite://'}}")
|
Add a test to demo config passing.
|
Add a test to demo config passing.
|
Python
|
bsd-2-clause
|
jd-boyd/corker,vs-networks/corker
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
Add a test to demo config passing.
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
def test_config():
mapper = Mapper()
def bob(request, link, **config):
def inner():
print "C:", config
return Response("Bob! %r" % config)
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper, config={'DB_URL': 'sqlite://'}) #('./')
app = TestApp(test_app)
ret = app.get('/bob/')
eq_(ret.body, "Bob! {'config': {'DB_URL': 'sqlite://'}}")
|
<commit_before>""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
<commit_msg>Add a test to demo config passing.<commit_after>
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
def test_config():
mapper = Mapper()
def bob(request, link, **config):
def inner():
print "C:", config
return Response("Bob! %r" % config)
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper, config={'DB_URL': 'sqlite://'}) #('./')
app = TestApp(test_app)
ret = app.get('/bob/')
eq_(ret.body, "Bob! {'config': {'DB_URL': 'sqlite://'}}")
|
""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
Add a test to demo config passing.""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
def test_config():
mapper = Mapper()
def bob(request, link, **config):
def inner():
print "C:", config
return Response("Bob! %r" % config)
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper, config={'DB_URL': 'sqlite://'}) #('./')
app = TestApp(test_app)
ret = app.get('/bob/')
eq_(ret.body, "Bob! {'config': {'DB_URL': 'sqlite://'}}")
|
<commit_before>""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
<commit_msg>Add a test to demo config passing.<commit_after>""" Inspired by: http://blog.ianbicking.org/2010/03/12/a-webob-app-example/
"""
from __future__ import absolute_import
from webob import Request, Response, exc
from routes import Mapper
from corker.controller import BaseController, route
from corker.app import Application
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
@route('view/{item}')
def view(self, item):
return Response('Hi view %r!\n' % item)
from webtest import TestApp
from nose.tools import eq_
def test_main():
mapper = Mapper()
Index.setup_routes(mapper)
def bob(request, link, **config):
def inner():
return Response("Bob!")
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper) #('./')
app = TestApp(test_app)
ret = app.get('/view/4')
eq_(ret.body, "Hi view u'4'!\n")
ret = app.get('/')
eq_(ret.body, "Hi index!\n")
ret = app.get('/bob/')
eq_(ret.body, "Bob!")
def test_config():
mapper = Mapper()
def bob(request, link, **config):
def inner():
print "C:", config
return Response("Bob! %r" % config)
return inner
mapper.connect('bob', '/bob/', controller=bob)
test_app = Application(mapper, config={'DB_URL': 'sqlite://'}) #('./')
app = TestApp(test_app)
ret = app.get('/bob/')
eq_(ret.body, "Bob! {'config': {'DB_URL': 'sqlite://'}}")
|
97aa284612af51f3550052788f8a2e505d2f8cb9
|
tests/test_0div.py
|
tests/test_0div.py
|
import angr
import nose
import os
test_location = os.path.join(os.path.dirname(__file__), '../../binaries/tests')
def run_0div(arch):
# check that we run in unicorn up to the zero-div site, fall back, try again in angr, and error correctly.
p = angr.Project(os.path.join(test_location, arch, 'test_0div'))
s = p.factory.entry_state(add_options=angr.options.unicorn)
simgr = p.factory.simulation_manager(s)
simgr.step(n=5)
nose.tools.assert_equal(len(simgr.active), 1)
simgr.step()
nose.tools.assert_equal(len(simgr.errored), 1)
nose.tools.assert_true(isinstance(simgr.errored[0].error, angr.errors.SimZeroDivisionException))
def test_0div():
yield run_0div, 'i386'
yield run_0div, 'x86_64'
if __name__ == '__main__':
for func, arg in test_0div():
func(arg)
|
Add tests for correct behavior on divide-by-zero
|
Add tests for correct behavior on divide-by-zero
|
Python
|
bsd-2-clause
|
iamahuman/angr,chubbymaggie/angr,chubbymaggie/angr,chubbymaggie/angr,iamahuman/angr,axt/angr,tyb0807/angr,angr/angr,schieb/angr,f-prettyland/angr,angr/angr,axt/angr,axt/angr,f-prettyland/angr,angr/angr,f-prettyland/angr,schieb/angr,tyb0807/angr,schieb/angr,iamahuman/angr,tyb0807/angr
|
Add tests for correct behavior on divide-by-zero
|
import angr
import nose
import os
test_location = os.path.join(os.path.dirname(__file__), '../../binaries/tests')
def run_0div(arch):
# check that we run in unicorn up to the zero-div site, fall back, try again in angr, and error correctly.
p = angr.Project(os.path.join(test_location, arch, 'test_0div'))
s = p.factory.entry_state(add_options=angr.options.unicorn)
simgr = p.factory.simulation_manager(s)
simgr.step(n=5)
nose.tools.assert_equal(len(simgr.active), 1)
simgr.step()
nose.tools.assert_equal(len(simgr.errored), 1)
nose.tools.assert_true(isinstance(simgr.errored[0].error, angr.errors.SimZeroDivisionException))
def test_0div():
yield run_0div, 'i386'
yield run_0div, 'x86_64'
if __name__ == '__main__':
for func, arg in test_0div():
func(arg)
|
<commit_before><commit_msg>Add tests for correct behavior on divide-by-zero<commit_after>
|
import angr
import nose
import os
test_location = os.path.join(os.path.dirname(__file__), '../../binaries/tests')
def run_0div(arch):
# check that we run in unicorn up to the zero-div site, fall back, try again in angr, and error correctly.
p = angr.Project(os.path.join(test_location, arch, 'test_0div'))
s = p.factory.entry_state(add_options=angr.options.unicorn)
simgr = p.factory.simulation_manager(s)
simgr.step(n=5)
nose.tools.assert_equal(len(simgr.active), 1)
simgr.step()
nose.tools.assert_equal(len(simgr.errored), 1)
nose.tools.assert_true(isinstance(simgr.errored[0].error, angr.errors.SimZeroDivisionException))
def test_0div():
yield run_0div, 'i386'
yield run_0div, 'x86_64'
if __name__ == '__main__':
for func, arg in test_0div():
func(arg)
|
Add tests for correct behavior on divide-by-zeroimport angr
import nose
import os
test_location = os.path.join(os.path.dirname(__file__), '../../binaries/tests')
def run_0div(arch):
# check that we run in unicorn up to the zero-div site, fall back, try again in angr, and error correctly.
p = angr.Project(os.path.join(test_location, arch, 'test_0div'))
s = p.factory.entry_state(add_options=angr.options.unicorn)
simgr = p.factory.simulation_manager(s)
simgr.step(n=5)
nose.tools.assert_equal(len(simgr.active), 1)
simgr.step()
nose.tools.assert_equal(len(simgr.errored), 1)
nose.tools.assert_true(isinstance(simgr.errored[0].error, angr.errors.SimZeroDivisionException))
def test_0div():
yield run_0div, 'i386'
yield run_0div, 'x86_64'
if __name__ == '__main__':
for func, arg in test_0div():
func(arg)
|
<commit_before><commit_msg>Add tests for correct behavior on divide-by-zero<commit_after>import angr
import nose
import os
test_location = os.path.join(os.path.dirname(__file__), '../../binaries/tests')
def run_0div(arch):
# check that we run in unicorn up to the zero-div site, fall back, try again in angr, and error correctly.
p = angr.Project(os.path.join(test_location, arch, 'test_0div'))
s = p.factory.entry_state(add_options=angr.options.unicorn)
simgr = p.factory.simulation_manager(s)
simgr.step(n=5)
nose.tools.assert_equal(len(simgr.active), 1)
simgr.step()
nose.tools.assert_equal(len(simgr.errored), 1)
nose.tools.assert_true(isinstance(simgr.errored[0].error, angr.errors.SimZeroDivisionException))
def test_0div():
yield run_0div, 'i386'
yield run_0div, 'x86_64'
if __name__ == '__main__':
for func, arg in test_0div():
func(arg)
|
|
ab4f9340adf6d20cdd7585fd3ee97ce8425b1458
|
tests/data_layer/conftest.py
|
tests/data_layer/conftest.py
|
import os
from pytest import fixture
from smif.data_layer import DatafileInterface
from smif.data_layer.load import dump
@fixture(scope='function')
def get_handler_csv(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_csv')
@fixture(scope='function')
def get_handler_binary(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_binary')
|
Move fixtures into data layer
|
Move fixtures into data layer
|
Python
|
mit
|
willu47/smif,willu47/smif,nismod/smif,nismod/smif,tomalrussell/smif,nismod/smif,tomalrussell/smif,willu47/smif,tomalrussell/smif,nismod/smif,tomalrussell/smif,willu47/smif
|
Move fixtures into data layer
|
import os
from pytest import fixture
from smif.data_layer import DatafileInterface
from smif.data_layer.load import dump
@fixture(scope='function')
def get_handler_csv(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_csv')
@fixture(scope='function')
def get_handler_binary(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_binary')
|
<commit_before><commit_msg>Move fixtures into data layer<commit_after>
|
import os
from pytest import fixture
from smif.data_layer import DatafileInterface
from smif.data_layer.load import dump
@fixture(scope='function')
def get_handler_csv(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_csv')
@fixture(scope='function')
def get_handler_binary(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_binary')
|
Move fixtures into data layerimport os
from pytest import fixture
from smif.data_layer import DatafileInterface
from smif.data_layer.load import dump
@fixture(scope='function')
def get_handler_csv(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_csv')
@fixture(scope='function')
def get_handler_binary(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_binary')
|
<commit_before><commit_msg>Move fixtures into data layer<commit_after>import os
from pytest import fixture
from smif.data_layer import DatafileInterface
from smif.data_layer.load import dump
@fixture(scope='function')
def get_handler_csv(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_csv')
@fixture(scope='function')
def get_handler_binary(setup_folder_structure, project_config):
basefolder = setup_folder_structure
project_config_path = os.path.join(
str(basefolder), 'config', 'project.yml')
dump(project_config, project_config_path)
return DatafileInterface(str(basefolder), 'local_binary')
|
|
06aa3468bf67760ddc15c50e3ff98218264fdfce
|
migrations/versions/194_extend_eas_folder_id.py
|
migrations/versions/194_extend_eas_folder_id.py
|
"""extend size of eas_folder_id and eas_parent_id
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '69e93aef3e9'
down_revision = '691fa97024d'
from alembic import op
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(191),"
" MODIFY eas_parent_id VARCHAR(191)"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(64),"
" MODIFY eas_parent_id VARCHAR(64)"))
|
Fix EAS folder id length
|
Fix EAS folder id length
Summary:
Fixes T2458. Note that because we're using MySQL 5.6, the migration happens offline and locks the table. However, since we only have about 20k columns, it shouldn't take too long.
Test Plan: Upgraded a db dump.
Reviewers: kav-ya, emfree
Reviewed By: emfree
Subscribers: spang
Maniphest Tasks: T2458
Differential Revision: https://phab.nylas.com/D1827
|
Python
|
agpl-3.0
|
ErinCall/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine,closeio/nylas,gale320/sync-engine,ErinCall/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,nylas/sync-engine,gale320/sync-engine,jobscore/sync-engine,closeio/nylas,closeio/nylas,Eagles2F/sync-engine,jobscore/sync-engine,nylas/sync-engine,closeio/nylas,ErinCall/sync-engine,nylas/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,gale320/sync-engine,jobscore/sync-engine
|
Fix EAS folder id length
Summary:
Fixes T2458. Note that because we're using MySQL 5.6, the migration happens offline and locks the table. However, since we only have about 20k columns, it shouldn't take too long.
Test Plan: Upgraded a db dump.
Reviewers: kav-ya, emfree
Reviewed By: emfree
Subscribers: spang
Maniphest Tasks: T2458
Differential Revision: https://phab.nylas.com/D1827
|
"""extend size of eas_folder_id and eas_parent_id
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '69e93aef3e9'
down_revision = '691fa97024d'
from alembic import op
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(191),"
" MODIFY eas_parent_id VARCHAR(191)"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(64),"
" MODIFY eas_parent_id VARCHAR(64)"))
|
<commit_before><commit_msg>Fix EAS folder id length
Summary:
Fixes T2458. Note that because we're using MySQL 5.6, the migration happens offline and locks the table. However, since we only have about 20k columns, it shouldn't take too long.
Test Plan: Upgraded a db dump.
Reviewers: kav-ya, emfree
Reviewed By: emfree
Subscribers: spang
Maniphest Tasks: T2458
Differential Revision: https://phab.nylas.com/D1827<commit_after>
|
"""extend size of eas_folder_id and eas_parent_id
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '69e93aef3e9'
down_revision = '691fa97024d'
from alembic import op
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(191),"
" MODIFY eas_parent_id VARCHAR(191)"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(64),"
" MODIFY eas_parent_id VARCHAR(64)"))
|
Fix EAS folder id length
Summary:
Fixes T2458. Note that because we're using MySQL 5.6, the migration happens offline and locks the table. However, since we only have about 20k columns, it shouldn't take too long.
Test Plan: Upgraded a db dump.
Reviewers: kav-ya, emfree
Reviewed By: emfree
Subscribers: spang
Maniphest Tasks: T2458
Differential Revision: https://phab.nylas.com/D1827"""extend size of eas_folder_id and eas_parent_id
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '69e93aef3e9'
down_revision = '691fa97024d'
from alembic import op
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(191),"
" MODIFY eas_parent_id VARCHAR(191)"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(64),"
" MODIFY eas_parent_id VARCHAR(64)"))
|
<commit_before><commit_msg>Fix EAS folder id length
Summary:
Fixes T2458. Note that because we're using MySQL 5.6, the migration happens offline and locks the table. However, since we only have about 20k columns, it shouldn't take too long.
Test Plan: Upgraded a db dump.
Reviewers: kav-ya, emfree
Reviewed By: emfree
Subscribers: spang
Maniphest Tasks: T2458
Differential Revision: https://phab.nylas.com/D1827<commit_after>"""extend size of eas_folder_id and eas_parent_id
Revision ID: 211e93aff1e1
Revises: 2493281d621
Create Date: 2015-03-20 18:50:29.961734
"""
# revision identifiers, used by Alembic.
revision = '69e93aef3e9'
down_revision = '691fa97024d'
from alembic import op
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(191),"
" MODIFY eas_parent_id VARCHAR(191)"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersyncstatus' in Base.metadata.tables:
conn.execute(text("ALTER TABLE easfoldersyncstatus MODIFY eas_folder_id VARCHAR(64),"
" MODIFY eas_parent_id VARCHAR(64)"))
|
|
1b673070e73b44d2c65039367d3d7cbe780cadc7
|
tests/test_trips_ontology.py
|
tests/test_trips_ontology.py
|
from bioagents.resources.trips_ont_manager import trips_isa
def test_isa_disease():
assert trips_isa('diabetes', 'disease')
assert not trips_isa('disease', 'diabetes')
assert trips_isa('cancer', 'medical-disorders-and-conditions')
assert trips_isa('ONT::CANCER', 'disease')
assert trips_isa('ont::cancer', 'disease')
assert trips_isa('CANCER', 'ONT::DISEASE')
|
Test TRIPS ontology with disease names
|
Test TRIPS ontology with disease names
|
Python
|
bsd-2-clause
|
bgyori/bioagents,sorgerlab/bioagents
|
Test TRIPS ontology with disease names
|
from bioagents.resources.trips_ont_manager import trips_isa
def test_isa_disease():
assert trips_isa('diabetes', 'disease')
assert not trips_isa('disease', 'diabetes')
assert trips_isa('cancer', 'medical-disorders-and-conditions')
assert trips_isa('ONT::CANCER', 'disease')
assert trips_isa('ont::cancer', 'disease')
assert trips_isa('CANCER', 'ONT::DISEASE')
|
<commit_before><commit_msg>Test TRIPS ontology with disease names<commit_after>
|
from bioagents.resources.trips_ont_manager import trips_isa
def test_isa_disease():
assert trips_isa('diabetes', 'disease')
assert not trips_isa('disease', 'diabetes')
assert trips_isa('cancer', 'medical-disorders-and-conditions')
assert trips_isa('ONT::CANCER', 'disease')
assert trips_isa('ont::cancer', 'disease')
assert trips_isa('CANCER', 'ONT::DISEASE')
|
Test TRIPS ontology with disease namesfrom bioagents.resources.trips_ont_manager import trips_isa
def test_isa_disease():
assert trips_isa('diabetes', 'disease')
assert not trips_isa('disease', 'diabetes')
assert trips_isa('cancer', 'medical-disorders-and-conditions')
assert trips_isa('ONT::CANCER', 'disease')
assert trips_isa('ont::cancer', 'disease')
assert trips_isa('CANCER', 'ONT::DISEASE')
|
<commit_before><commit_msg>Test TRIPS ontology with disease names<commit_after>from bioagents.resources.trips_ont_manager import trips_isa
def test_isa_disease():
assert trips_isa('diabetes', 'disease')
assert not trips_isa('disease', 'diabetes')
assert trips_isa('cancer', 'medical-disorders-and-conditions')
assert trips_isa('ONT::CANCER', 'disease')
assert trips_isa('ont::cancer', 'disease')
assert trips_isa('CANCER', 'ONT::DISEASE')
|
|
2340167e3be058973415a1d53a358e08a396f76e
|
bin/split_allseq.py
|
bin/split_allseq.py
|
#! /usr/bin/env python3
import os, argparse
def split_fs(output_dir_fn):
i = 0
while True:
i += 1
fn = output_dir_fn + '/' + str(i) + '.fna'
yield open(fn, 'w'), i
parser = argparse.ArgumentParser(
description='Split a file containing all the sequences in multiple files')
parser.add_argument('input_file', help='Fasta file containing all the sequences')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
input_f = args.input_file
output_dir_fn = args.output_dir
start_seq = '>'
split_f = split_fs(output_dir_fn)
outfile = None
with open(input_f, 'r') as fasta:
for line in fasta:
if start_seq not in line:
outfile.write(line)
else:
if outfile:
outfile.close()
outfile, i = next(split_f)
outfile.write(line)
outfile.close()
|
Add script to split HMP fasta into multiple files
|
Add script to split HMP fasta into multiple files
Former-commit-id: c7ec294cec9435208ab552ff76df0245a6825082
|
Python
|
mit
|
karel-brinda/prophyle,karel-brinda/prophyle,karel-brinda/prophyle,karel-brinda/prophyle
|
Add script to split HMP fasta into multiple files
Former-commit-id: c7ec294cec9435208ab552ff76df0245a6825082
|
#! /usr/bin/env python3
import os, argparse
def split_fs(output_dir_fn):
i = 0
while True:
i += 1
fn = output_dir_fn + '/' + str(i) + '.fna'
yield open(fn, 'w'), i
parser = argparse.ArgumentParser(
description='Split a file containing all the sequences in multiple files')
parser.add_argument('input_file', help='Fasta file containing all the sequences')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
input_f = args.input_file
output_dir_fn = args.output_dir
start_seq = '>'
split_f = split_fs(output_dir_fn)
outfile = None
with open(input_f, 'r') as fasta:
for line in fasta:
if start_seq not in line:
outfile.write(line)
else:
if outfile:
outfile.close()
outfile, i = next(split_f)
outfile.write(line)
outfile.close()
|
<commit_before><commit_msg>Add script to split HMP fasta into multiple files
Former-commit-id: c7ec294cec9435208ab552ff76df0245a6825082<commit_after>
|
#! /usr/bin/env python3
import os, argparse
def split_fs(output_dir_fn):
i = 0
while True:
i += 1
fn = output_dir_fn + '/' + str(i) + '.fna'
yield open(fn, 'w'), i
parser = argparse.ArgumentParser(
description='Split a file containing all the sequences in multiple files')
parser.add_argument('input_file', help='Fasta file containing all the sequences')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
input_f = args.input_file
output_dir_fn = args.output_dir
start_seq = '>'
split_f = split_fs(output_dir_fn)
outfile = None
with open(input_f, 'r') as fasta:
for line in fasta:
if start_seq not in line:
outfile.write(line)
else:
if outfile:
outfile.close()
outfile, i = next(split_f)
outfile.write(line)
outfile.close()
|
Add script to split HMP fasta into multiple files
Former-commit-id: c7ec294cec9435208ab552ff76df0245a6825082#! /usr/bin/env python3
import os, argparse
def split_fs(output_dir_fn):
i = 0
while True:
i += 1
fn = output_dir_fn + '/' + str(i) + '.fna'
yield open(fn, 'w'), i
parser = argparse.ArgumentParser(
description='Split a file containing all the sequences in multiple files')
parser.add_argument('input_file', help='Fasta file containing all the sequences')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
input_f = args.input_file
output_dir_fn = args.output_dir
start_seq = '>'
split_f = split_fs(output_dir_fn)
outfile = None
with open(input_f, 'r') as fasta:
for line in fasta:
if start_seq not in line:
outfile.write(line)
else:
if outfile:
outfile.close()
outfile, i = next(split_f)
outfile.write(line)
outfile.close()
|
<commit_before><commit_msg>Add script to split HMP fasta into multiple files
Former-commit-id: c7ec294cec9435208ab552ff76df0245a6825082<commit_after>#! /usr/bin/env python3
import os, argparse
def split_fs(output_dir_fn):
i = 0
while True:
i += 1
fn = output_dir_fn + '/' + str(i) + '.fna'
yield open(fn, 'w'), i
parser = argparse.ArgumentParser(
description='Split a file containing all the sequences in multiple files')
parser.add_argument('input_file', help='Fasta file containing all the sequences')
parser.add_argument('output_dir', help='Output directory')
args = parser.parse_args()
input_f = args.input_file
output_dir_fn = args.output_dir
start_seq = '>'
split_f = split_fs(output_dir_fn)
outfile = None
with open(input_f, 'r') as fasta:
for line in fasta:
if start_seq not in line:
outfile.write(line)
else:
if outfile:
outfile.close()
outfile, i = next(split_f)
outfile.write(line)
outfile.close()
|
|
21f7d9d34b4e5483319372b2ca07b49a7ded8b23
|
bin/gensim2sense.py
|
bin/gensim2sense.py
|
from sense2vec.vectors import VectorMap
from gensim.models import Word2Vec
import plac
@plac.annotations(
gensim_model_path=("Location of gensim's .bin file"),
out_dir=("Location of output directory"),
min_count=("Min count", "option", "m", int),
)
def main(gensim_model_path, out_dir, min_count=None):
"""Convert a gensim.models.Word2Vec file to VectorMap format"""
gensim_model = Word2Vec.load(gensim_model_path)
vector_map = VectorMap(128)
if min_count is None:
min_count = gensim_model.min_count
for string in gensim_model.vocab:
vocab = gensim_model.vocab[string]
freq, idx = vocab.count, vocab.index
if freq < min_count:
continue
vector = gensim_model.syn0[idx]
vector_map.borrow(string, freq, vector)
vector_map.save(out_dir)
if __name__ == '__main__':
plac.call(main)
|
Add gensim to sense2vec format converter script
|
Add gensim to sense2vec format converter script
|
Python
|
mit
|
spacy-io/sense2vec,spacy-io/sense2vec,spacy-io/sense2vec
|
Add gensim to sense2vec format converter script
|
from sense2vec.vectors import VectorMap
from gensim.models import Word2Vec
import plac
@plac.annotations(
gensim_model_path=("Location of gensim's .bin file"),
out_dir=("Location of output directory"),
min_count=("Min count", "option", "m", int),
)
def main(gensim_model_path, out_dir, min_count=None):
"""Convert a gensim.models.Word2Vec file to VectorMap format"""
gensim_model = Word2Vec.load(gensim_model_path)
vector_map = VectorMap(128)
if min_count is None:
min_count = gensim_model.min_count
for string in gensim_model.vocab:
vocab = gensim_model.vocab[string]
freq, idx = vocab.count, vocab.index
if freq < min_count:
continue
vector = gensim_model.syn0[idx]
vector_map.borrow(string, freq, vector)
vector_map.save(out_dir)
if __name__ == '__main__':
plac.call(main)
|
<commit_before><commit_msg>Add gensim to sense2vec format converter script<commit_after>
|
from sense2vec.vectors import VectorMap
from gensim.models import Word2Vec
import plac
@plac.annotations(
gensim_model_path=("Location of gensim's .bin file"),
out_dir=("Location of output directory"),
min_count=("Min count", "option", "m", int),
)
def main(gensim_model_path, out_dir, min_count=None):
"""Convert a gensim.models.Word2Vec file to VectorMap format"""
gensim_model = Word2Vec.load(gensim_model_path)
vector_map = VectorMap(128)
if min_count is None:
min_count = gensim_model.min_count
for string in gensim_model.vocab:
vocab = gensim_model.vocab[string]
freq, idx = vocab.count, vocab.index
if freq < min_count:
continue
vector = gensim_model.syn0[idx]
vector_map.borrow(string, freq, vector)
vector_map.save(out_dir)
if __name__ == '__main__':
plac.call(main)
|
Add gensim to sense2vec format converter scriptfrom sense2vec.vectors import VectorMap
from gensim.models import Word2Vec
import plac
@plac.annotations(
gensim_model_path=("Location of gensim's .bin file"),
out_dir=("Location of output directory"),
min_count=("Min count", "option", "m", int),
)
def main(gensim_model_path, out_dir, min_count=None):
"""Convert a gensim.models.Word2Vec file to VectorMap format"""
gensim_model = Word2Vec.load(gensim_model_path)
vector_map = VectorMap(128)
if min_count is None:
min_count = gensim_model.min_count
for string in gensim_model.vocab:
vocab = gensim_model.vocab[string]
freq, idx = vocab.count, vocab.index
if freq < min_count:
continue
vector = gensim_model.syn0[idx]
vector_map.borrow(string, freq, vector)
vector_map.save(out_dir)
if __name__ == '__main__':
plac.call(main)
|
<commit_before><commit_msg>Add gensim to sense2vec format converter script<commit_after>from sense2vec.vectors import VectorMap
from gensim.models import Word2Vec
import plac
@plac.annotations(
gensim_model_path=("Location of gensim's .bin file"),
out_dir=("Location of output directory"),
min_count=("Min count", "option", "m", int),
)
def main(gensim_model_path, out_dir, min_count=None):
"""Convert a gensim.models.Word2Vec file to VectorMap format"""
gensim_model = Word2Vec.load(gensim_model_path)
vector_map = VectorMap(128)
if min_count is None:
min_count = gensim_model.min_count
for string in gensim_model.vocab:
vocab = gensim_model.vocab[string]
freq, idx = vocab.count, vocab.index
if freq < min_count:
continue
vector = gensim_model.syn0[idx]
vector_map.borrow(string, freq, vector)
vector_map.save(out_dir)
if __name__ == '__main__':
plac.call(main)
|
|
59ecd26da00145c7122dd371e2f116acbafee387
|
confusion_matrix.py
|
confusion_matrix.py
|
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(314151692)
y_pred = np.random.random_integers(0, 1, 1000)
y_true = np.random.random_integers(0, 1, 1000)
data = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(1)
sns.heatmap(data, ax=ax)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.show()
|
Add a confusion matrix plot
|
Add a confusion matrix plot
|
Python
|
mit
|
yassineAlouini/ml-experiments,yassineAlouini/ml-experiments
|
Add a confusion matrix plot
|
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(314151692)
y_pred = np.random.random_integers(0, 1, 1000)
y_true = np.random.random_integers(0, 1, 1000)
data = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(1)
sns.heatmap(data, ax=ax)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.show()
|
<commit_before><commit_msg>Add a confusion matrix plot<commit_after>
|
import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(314151692)
y_pred = np.random.random_integers(0, 1, 1000)
y_true = np.random.random_integers(0, 1, 1000)
data = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(1)
sns.heatmap(data, ax=ax)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.show()
|
Add a confusion matrix plotimport matplotlib.pylab as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(314151692)
y_pred = np.random.random_integers(0, 1, 1000)
y_true = np.random.random_integers(0, 1, 1000)
data = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(1)
sns.heatmap(data, ax=ax)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.show()
|
<commit_before><commit_msg>Add a confusion matrix plot<commit_after>import matplotlib.pylab as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(314151692)
y_pred = np.random.random_integers(0, 1, 1000)
y_true = np.random.random_integers(0, 1, 1000)
data = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(1)
sns.heatmap(data, ax=ax)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.show()
|
|
b36380aa5a4b10846a2888184135349048415d7c
|
ph_unfolder/analysis/time_measurer.py
|
ph_unfolder/analysis/time_measurer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import time
class TimeMeasurer(object):
"""Measure method execution time.
This class is made based on the suggestion in the following reference.
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
"""
def __init__(self, time_string):
self._time_string = time_string
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args):
time_string = self._time_string
self._finish = time.time()
interval = self._finish - self._start
print('{:20s} (sec.): {:12.4f}'.format(time_string, interval))
|
Add the module to measure computational times
|
Add the module to measure computational times
|
Python
|
mit
|
yuzie007/ph_unfolder,yuzie007/upho
|
Add the module to measure computational times
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import time
class TimeMeasurer(object):
"""Measure method execution time.
This class is made based on the suggestion in the following reference.
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
"""
def __init__(self, time_string):
self._time_string = time_string
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args):
time_string = self._time_string
self._finish = time.time()
interval = self._finish - self._start
print('{:20s} (sec.): {:12.4f}'.format(time_string, interval))
|
<commit_before><commit_msg>Add the module to measure computational times<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import time
class TimeMeasurer(object):
"""Measure method execution time.
This class is made based on the suggestion in the following reference.
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
"""
def __init__(self, time_string):
self._time_string = time_string
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args):
time_string = self._time_string
self._finish = time.time()
interval = self._finish - self._start
print('{:20s} (sec.): {:12.4f}'.format(time_string, interval))
|
Add the module to measure computational times#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import time
class TimeMeasurer(object):
"""Measure method execution time.
This class is made based on the suggestion in the following reference.
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
"""
def __init__(self, time_string):
self._time_string = time_string
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args):
time_string = self._time_string
self._finish = time.time()
interval = self._finish - self._start
print('{:20s} (sec.): {:12.4f}'.format(time_string, interval))
|
<commit_before><commit_msg>Add the module to measure computational times<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import time
class TimeMeasurer(object):
"""Measure method execution time.
This class is made based on the suggestion in the following reference.
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
"""
def __init__(self, time_string):
self._time_string = time_string
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args):
time_string = self._time_string
self._finish = time.time()
interval = self._finish - self._start
print('{:20s} (sec.): {:12.4f}'.format(time_string, interval))
|
|
34f618a17fea9c8172d91a825f3992baf7255832
|
2009/qualification_round/welcome_to_code_jam.py
|
2009/qualification_round/welcome_to_code_jam.py
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2009
# Qualification Round 2009
# Problem C. Welcome to Code Jam
from __future__ import print_function
SUBSTRING = 'welcome to code jam'
def count_welcome(s):
if len(s) < len(SUBSTRING):
return 0
count = 1
indexes = []
last_index = 0
for c in SUBSTRING:
last_index = s.find(c, last_index)
indexes.append(last_index)
print(indexes)
def generate_index(s):
for c in SUBSTRING:
if __name__ == '__main__':
import os
samples = [
'elcomew elcome to code jam',
'wweellccoommee to code qps jam',
'welcome to codejam'
]
for sample in samples:
print(count_welcome(sample))
|
Add welcome to code jam
|
Add welcome to code jam
|
Python
|
apache-2.0
|
laichunpongben/CodeJam
|
Add welcome to code jam
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2009
# Qualification Round 2009
# Problem C. Welcome to Code Jam
from __future__ import print_function
SUBSTRING = 'welcome to code jam'
def count_welcome(s):
if len(s) < len(SUBSTRING):
return 0
count = 1
indexes = []
last_index = 0
for c in SUBSTRING:
last_index = s.find(c, last_index)
indexes.append(last_index)
print(indexes)
def generate_index(s):
for c in SUBSTRING:
if __name__ == '__main__':
import os
samples = [
'elcomew elcome to code jam',
'wweellccoommee to code qps jam',
'welcome to codejam'
]
for sample in samples:
print(count_welcome(sample))
|
<commit_before><commit_msg>Add welcome to code jam<commit_after>
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2009
# Qualification Round 2009
# Problem C. Welcome to Code Jam
from __future__ import print_function
SUBSTRING = 'welcome to code jam'
def count_welcome(s):
if len(s) < len(SUBSTRING):
return 0
count = 1
indexes = []
last_index = 0
for c in SUBSTRING:
last_index = s.find(c, last_index)
indexes.append(last_index)
print(indexes)
def generate_index(s):
for c in SUBSTRING:
if __name__ == '__main__':
import os
samples = [
'elcomew elcome to code jam',
'wweellccoommee to code qps jam',
'welcome to codejam'
]
for sample in samples:
print(count_welcome(sample))
|
Add welcome to code jam#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2009
# Qualification Round 2009
# Problem C. Welcome to Code Jam
from __future__ import print_function
SUBSTRING = 'welcome to code jam'
def count_welcome(s):
if len(s) < len(SUBSTRING):
return 0
count = 1
indexes = []
last_index = 0
for c in SUBSTRING:
last_index = s.find(c, last_index)
indexes.append(last_index)
print(indexes)
def generate_index(s):
for c in SUBSTRING:
if __name__ == '__main__':
import os
samples = [
'elcomew elcome to code jam',
'wweellccoommee to code qps jam',
'welcome to codejam'
]
for sample in samples:
print(count_welcome(sample))
|
<commit_before><commit_msg>Add welcome to code jam<commit_after>#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2009
# Qualification Round 2009
# Problem C. Welcome to Code Jam
from __future__ import print_function
SUBSTRING = 'welcome to code jam'
def count_welcome(s):
if len(s) < len(SUBSTRING):
return 0
count = 1
indexes = []
last_index = 0
for c in SUBSTRING:
last_index = s.find(c, last_index)
indexes.append(last_index)
print(indexes)
def generate_index(s):
for c in SUBSTRING:
if __name__ == '__main__':
import os
samples = [
'elcomew elcome to code jam',
'wweellccoommee to code qps jam',
'welcome to codejam'
]
for sample in samples:
print(count_welcome(sample))
|
|
60ed9500ce5e252c088bf5c90c4db4e35ef6a978
|
setup.py
|
setup.py
|
import pip
import os
projectName = ''
def install(package):
pip.main(['install', package])
def installAll():
allPackages = [
'deap',
'numpy',
]
for p in allPackages:
install(p)
def getProjectName():
initialDir = os.getcwd()
os.chdir('../')
dirList1 = os.listdir('.')
dirList2 = os.listdir('..')
tmpSet = set(dirList1).intersection(set(dirList2))
tmpList = list(tmpSet)
if len(tmpSet) == 0:
print 'Cannot find project name'
else:
projectName = tmpList[0]
print 'Project name = ' + projectName
os.chdir(initialDir)
#Main
if __name__ == '__main__':
installAll()
|
Add set up scripts to install dependencies
|
Add set up scripts to install dependencies
|
Python
|
mit
|
cameronlai/EXT_GEN,cameronlai/EXT_GEN
|
Add set up scripts to install dependencies
|
import pip
import os
projectName = ''
def install(package):
pip.main(['install', package])
def installAll():
allPackages = [
'deap',
'numpy',
]
for p in allPackages:
install(p)
def getProjectName():
initialDir = os.getcwd()
os.chdir('../')
dirList1 = os.listdir('.')
dirList2 = os.listdir('..')
tmpSet = set(dirList1).intersection(set(dirList2))
tmpList = list(tmpSet)
if len(tmpSet) == 0:
print 'Cannot find project name'
else:
projectName = tmpList[0]
print 'Project name = ' + projectName
os.chdir(initialDir)
#Main
if __name__ == '__main__':
installAll()
|
<commit_before><commit_msg>Add set up scripts to install dependencies<commit_after>
|
import pip
import os
projectName = ''
def install(package):
pip.main(['install', package])
def installAll():
allPackages = [
'deap',
'numpy',
]
for p in allPackages:
install(p)
def getProjectName():
initialDir = os.getcwd()
os.chdir('../')
dirList1 = os.listdir('.')
dirList2 = os.listdir('..')
tmpSet = set(dirList1).intersection(set(dirList2))
tmpList = list(tmpSet)
if len(tmpSet) == 0:
print 'Cannot find project name'
else:
projectName = tmpList[0]
print 'Project name = ' + projectName
os.chdir(initialDir)
#Main
if __name__ == '__main__':
installAll()
|
Add set up scripts to install dependenciesimport pip
import os
projectName = ''
def install(package):
pip.main(['install', package])
def installAll():
allPackages = [
'deap',
'numpy',
]
for p in allPackages:
install(p)
def getProjectName():
initialDir = os.getcwd()
os.chdir('../')
dirList1 = os.listdir('.')
dirList2 = os.listdir('..')
tmpSet = set(dirList1).intersection(set(dirList2))
tmpList = list(tmpSet)
if len(tmpSet) == 0:
print 'Cannot find project name'
else:
projectName = tmpList[0]
print 'Project name = ' + projectName
os.chdir(initialDir)
#Main
if __name__ == '__main__':
installAll()
|
<commit_before><commit_msg>Add set up scripts to install dependencies<commit_after>import pip
import os
projectName = ''
def install(package):
pip.main(['install', package])
def installAll():
allPackages = [
'deap',
'numpy',
]
for p in allPackages:
install(p)
def getProjectName():
initialDir = os.getcwd()
os.chdir('../')
dirList1 = os.listdir('.')
dirList2 = os.listdir('..')
tmpSet = set(dirList1).intersection(set(dirList2))
tmpList = list(tmpSet)
if len(tmpSet) == 0:
print 'Cannot find project name'
else:
projectName = tmpList[0]
print 'Project name = ' + projectName
os.chdir(initialDir)
#Main
if __name__ == '__main__':
installAll()
|
|
4c4564c7e00adeb56f18a192f9544145d8512232
|
test/cbio_client_test.py
|
test/cbio_client_test.py
|
from bioagents import cbio_client
def test_get_cancer_studies():
study_ids = cbio_client.get_cancer_studies('pancreatic')
assert(len(study_ids) > 0)
assert('paad_tcga' in study_ids)
def test_get_cancer_types():
type_ids = cbio_client.get_cancer_types('lung')
assert(len(type_ids) > 0)
def test_get_genetic_profiles():
genetic_profiles = cbio_client.get_genetic_profiles('paad_icgc', 'mutation')
assert(len(genetic_profiles) > 0)
def test_get_num_sequenced():
num_case = cbio_client.get_num_sequenced('paad_tcga')
assert(num_case > 0)
|
Add tests for cbio client
|
Add tests for cbio client
|
Python
|
bsd-2-clause
|
sorgerlab/bioagents,bgyori/bioagents
|
Add tests for cbio client
|
from bioagents import cbio_client
def test_get_cancer_studies():
study_ids = cbio_client.get_cancer_studies('pancreatic')
assert(len(study_ids) > 0)
assert('paad_tcga' in study_ids)
def test_get_cancer_types():
type_ids = cbio_client.get_cancer_types('lung')
assert(len(type_ids) > 0)
def test_get_genetic_profiles():
genetic_profiles = cbio_client.get_genetic_profiles('paad_icgc', 'mutation')
assert(len(genetic_profiles) > 0)
def test_get_num_sequenced():
num_case = cbio_client.get_num_sequenced('paad_tcga')
assert(num_case > 0)
|
<commit_before><commit_msg>Add tests for cbio client<commit_after>
|
from bioagents import cbio_client
def test_get_cancer_studies():
study_ids = cbio_client.get_cancer_studies('pancreatic')
assert(len(study_ids) > 0)
assert('paad_tcga' in study_ids)
def test_get_cancer_types():
type_ids = cbio_client.get_cancer_types('lung')
assert(len(type_ids) > 0)
def test_get_genetic_profiles():
genetic_profiles = cbio_client.get_genetic_profiles('paad_icgc', 'mutation')
assert(len(genetic_profiles) > 0)
def test_get_num_sequenced():
num_case = cbio_client.get_num_sequenced('paad_tcga')
assert(num_case > 0)
|
Add tests for cbio clientfrom bioagents import cbio_client
def test_get_cancer_studies():
study_ids = cbio_client.get_cancer_studies('pancreatic')
assert(len(study_ids) > 0)
assert('paad_tcga' in study_ids)
def test_get_cancer_types():
type_ids = cbio_client.get_cancer_types('lung')
assert(len(type_ids) > 0)
def test_get_genetic_profiles():
genetic_profiles = cbio_client.get_genetic_profiles('paad_icgc', 'mutation')
assert(len(genetic_profiles) > 0)
def test_get_num_sequenced():
num_case = cbio_client.get_num_sequenced('paad_tcga')
assert(num_case > 0)
|
<commit_before><commit_msg>Add tests for cbio client<commit_after>from bioagents import cbio_client
def test_get_cancer_studies():
study_ids = cbio_client.get_cancer_studies('pancreatic')
assert(len(study_ids) > 0)
assert('paad_tcga' in study_ids)
def test_get_cancer_types():
type_ids = cbio_client.get_cancer_types('lung')
assert(len(type_ids) > 0)
def test_get_genetic_profiles():
genetic_profiles = cbio_client.get_genetic_profiles('paad_icgc', 'mutation')
assert(len(genetic_profiles) > 0)
def test_get_num_sequenced():
num_case = cbio_client.get_num_sequenced('paad_tcga')
assert(num_case > 0)
|
|
890a15bc40d51a83345887eb320bc5b74216effd
|
neurokernel/pm_gpu.py
|
neurokernel/pm_gpu.py
|
#!/usr/bin/env python
"""
Port mapper for GPU memory.
"""
import numpy as np
import pycuda.gpuarray as gpuarray
from plsel import BasePortMapper
class GPUPortMapper(PortMapper):
def __init__(self, selector, data=None, portmap=None):
super(PortMapper, self).__init__(selector, portmap)
N = len(self)
if data is None or len(data) == 0:
self.data = gpuarray.empty(0, np.double)
else:
assert np.ndim(data) == 1
assert type(data) == gpuarray.GPUArray
# The integers in the port map must be valid indices into the
# data array:
assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
assert N <= len(data)
self.data = data.copy()
def get_inds_nonzero(self):
raise NotImplementedError
def get_ports_nonzero(self):
raise NotImplementedError
def get_by_inds(self, inds):
raise NotImplementedError
def set_by_ind(self, inds, data):
# Can be implemented using scikits.cuda.misc.set_by_index
raise NotImplementedError
|
Add initial version of GPU-based port mapper class.
|
Add initial version of GPU-based port mapper class.
|
Python
|
bsd-3-clause
|
cerrno/neurokernel
|
Add initial version of GPU-based port mapper class.
|
#!/usr/bin/env python
"""
Port mapper for GPU memory.
"""
import numpy as np
import pycuda.gpuarray as gpuarray
from plsel import BasePortMapper
class GPUPortMapper(PortMapper):
def __init__(self, selector, data=None, portmap=None):
super(PortMapper, self).__init__(selector, portmap)
N = len(self)
if data is None or len(data) == 0:
self.data = gpuarray.empty(0, np.double)
else:
assert np.ndim(data) == 1
assert type(data) == gpuarray.GPUArray
# The integers in the port map must be valid indices into the
# data array:
assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
assert N <= len(data)
self.data = data.copy()
def get_inds_nonzero(self):
raise NotImplementedError
def get_ports_nonzero(self):
raise NotImplementedError
def get_by_inds(self, inds):
raise NotImplementedError
def set_by_ind(self, inds, data):
# Can be implemented using scikits.cuda.misc.set_by_index
raise NotImplementedError
|
<commit_before><commit_msg>Add initial version of GPU-based port mapper class.<commit_after>
|
#!/usr/bin/env python
"""
Port mapper for GPU memory.
"""
import numpy as np
import pycuda.gpuarray as gpuarray
from plsel import BasePortMapper
class GPUPortMapper(PortMapper):
def __init__(self, selector, data=None, portmap=None):
super(PortMapper, self).__init__(selector, portmap)
N = len(self)
if data is None or len(data) == 0:
self.data = gpuarray.empty(0, np.double)
else:
assert np.ndim(data) == 1
assert type(data) == gpuarray.GPUArray
# The integers in the port map must be valid indices into the
# data array:
assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
assert N <= len(data)
self.data = data.copy()
def get_inds_nonzero(self):
raise NotImplementedError
def get_ports_nonzero(self):
raise NotImplementedError
def get_by_inds(self, inds):
raise NotImplementedError
def set_by_ind(self, inds, data):
# Can be implemented using scikits.cuda.misc.set_by_index
raise NotImplementedError
|
Add initial version of GPU-based port mapper class.#!/usr/bin/env python
"""
Port mapper for GPU memory.
"""
import numpy as np
import pycuda.gpuarray as gpuarray
from plsel import BasePortMapper
class GPUPortMapper(PortMapper):
def __init__(self, selector, data=None, portmap=None):
super(PortMapper, self).__init__(selector, portmap)
N = len(self)
if data is None or len(data) == 0:
self.data = gpuarray.empty(0, np.double)
else:
assert np.ndim(data) == 1
assert type(data) == gpuarray.GPUArray
# The integers in the port map must be valid indices into the
# data array:
assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
assert N <= len(data)
self.data = data.copy()
def get_inds_nonzero(self):
raise NotImplementedError
def get_ports_nonzero(self):
raise NotImplementedError
def get_by_inds(self, inds):
raise NotImplementedError
def set_by_ind(self, inds, data):
# Can be implemented using scikits.cuda.misc.set_by_index
raise NotImplementedError
|
<commit_before><commit_msg>Add initial version of GPU-based port mapper class.<commit_after>#!/usr/bin/env python
"""
Port mapper for GPU memory.
"""
import numpy as np
import pycuda.gpuarray as gpuarray
from plsel import BasePortMapper
class GPUPortMapper(PortMapper):
def __init__(self, selector, data=None, portmap=None):
super(PortMapper, self).__init__(selector, portmap)
N = len(self)
if data is None or len(data) == 0:
self.data = gpuarray.empty(0, np.double)
else:
assert np.ndim(data) == 1
assert type(data) == gpuarray.GPUArray
# The integers in the port map must be valid indices into the
# data array:
assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
assert N <= len(data)
self.data = data.copy()
def get_inds_nonzero(self):
raise NotImplementedError
def get_ports_nonzero(self):
raise NotImplementedError
def get_by_inds(self, inds):
raise NotImplementedError
def set_by_ind(self, inds, data):
# Can be implemented using scikits.cuda.misc.set_by_index
raise NotImplementedError
|
|
c93f4ded0b3eb33a9a06c784963845dd80144989
|
setup.py
|
setup.py
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
Update trove classifiers with generic language versions
|
Update trove classifiers with generic language versions
|
Python
|
mit
|
ghickman/tvrenamr,wintersandroid/tvrenamr
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
Update trove classifiers with generic language versions
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
<commit_before>import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
<commit_msg>Update trove classifiers with generic language versions<commit_after>
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
Update trove classifiers with generic language versionsimport multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
<commit_before>import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
<commit_msg>Update trove classifiers with generic language versions<commit_after>import multiprocessing # noqa # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ['pyyaml', 'requests']
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name=tvrenamr.__title__,
version=tvrenamr.__version__,
description='Rename tv show files using online databases',
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author=tvrenamr.__author__,
author_email='george@ghickman.co.uk',
url='http://tvrenamr.info',
license='MIT',
packages=['tvrenamr'],
entry_points={'console_scripts': ['tvr=tvrenamr.frontend:run']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
install_requires=requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
7e55b58960a2b2e320d115818c2ba052a8b17270
|
scrape.py
|
scrape.py
|
import requests, json, sys
from bs4 import BeautifulSoup
def print_stats(data, hero, stats):
print(hero + " " + stats)
header = ["Rarity: ", "HP: ", "ATK: ", "SPD: ", "DEF: ", "RES: "]
stat_counter = 0
rarity_counter = 0
for row in data:
table = data[rarity_counter].find_all("td")
for head in header:
print(head + table[stat_counter].text)
stat_counter+=1
stat_counter=0
rarity_counter+=1
print("\n")
hero = input("Hero Name: ")
url = ("https://feheroes.wiki/" + hero)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
# This will always have 2 elements
# 0 is for base stats
# 1 is for max stats
stats_tables = soup.find_all("table", class_="wikitable default")
# This is for the base stats TABLE
# Base stats is separated in TABLE
# 0 = Rarity
# 1 = HP
# 2 = ATK
# 3 = SPD
# 4 = DEF
# 5 = RES
base_rows = stats_tables[0].find_all("tr")
# Only get the actual data, not the header
base_stats = (base_rows[1:len(base_rows)])
print_stats(base_stats, hero, "base stats")
# This is for the max stats TABLE
max_rows = stats_tables[1].find_all("tr")
# Only get the actual data, not the header
max_stats = (max_rows[1:len(max_rows)])
print_stats(max_stats, hero, "max stats")
|
Update for Fire Emblem Heroes
|
Update for Fire Emblem Heroes
|
Python
|
mit
|
tyrenyabe/simple-scrape
|
Update for Fire Emblem Heroes
|
import requests, json, sys
from bs4 import BeautifulSoup
def print_stats(data, hero, stats):
print(hero + " " + stats)
header = ["Rarity: ", "HP: ", "ATK: ", "SPD: ", "DEF: ", "RES: "]
stat_counter = 0
rarity_counter = 0
for row in data:
table = data[rarity_counter].find_all("td")
for head in header:
print(head + table[stat_counter].text)
stat_counter+=1
stat_counter=0
rarity_counter+=1
print("\n")
hero = input("Hero Name: ")
url = ("https://feheroes.wiki/" + hero)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
# This will always have 2 elements
# 0 is for base stats
# 1 is for max stats
stats_tables = soup.find_all("table", class_="wikitable default")
# This is for the base stats TABLE
# Base stats is separated in TABLE
# 0 = Rarity
# 1 = HP
# 2 = ATK
# 3 = SPD
# 4 = DEF
# 5 = RES
base_rows = stats_tables[0].find_all("tr")
# Only get the actual data, not the header
base_stats = (base_rows[1:len(base_rows)])
print_stats(base_stats, hero, "base stats")
# This is for the max stats TABLE
max_rows = stats_tables[1].find_all("tr")
# Only get the actual data, not the header
max_stats = (max_rows[1:len(max_rows)])
print_stats(max_stats, hero, "max stats")
|
<commit_before><commit_msg>Update for Fire Emblem Heroes<commit_after>
|
import requests, json, sys
from bs4 import BeautifulSoup
def print_stats(data, hero, stats):
print(hero + " " + stats)
header = ["Rarity: ", "HP: ", "ATK: ", "SPD: ", "DEF: ", "RES: "]
stat_counter = 0
rarity_counter = 0
for row in data:
table = data[rarity_counter].find_all("td")
for head in header:
print(head + table[stat_counter].text)
stat_counter+=1
stat_counter=0
rarity_counter+=1
print("\n")
hero = input("Hero Name: ")
url = ("https://feheroes.wiki/" + hero)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
# This will always have 2 elements
# 0 is for base stats
# 1 is for max stats
stats_tables = soup.find_all("table", class_="wikitable default")
# This is for the base stats TABLE
# Base stats is separated in TABLE
# 0 = Rarity
# 1 = HP
# 2 = ATK
# 3 = SPD
# 4 = DEF
# 5 = RES
base_rows = stats_tables[0].find_all("tr")
# Only get the actual data, not the header
base_stats = (base_rows[1:len(base_rows)])
print_stats(base_stats, hero, "base stats")
# This is for the max stats TABLE
max_rows = stats_tables[1].find_all("tr")
# Only get the actual data, not the header
max_stats = (max_rows[1:len(max_rows)])
print_stats(max_stats, hero, "max stats")
|
Update for Fire Emblem Heroesimport requests, json, sys
from bs4 import BeautifulSoup
def print_stats(data, hero, stats):
print(hero + " " + stats)
header = ["Rarity: ", "HP: ", "ATK: ", "SPD: ", "DEF: ", "RES: "]
stat_counter = 0
rarity_counter = 0
for row in data:
table = data[rarity_counter].find_all("td")
for head in header:
print(head + table[stat_counter].text)
stat_counter+=1
stat_counter=0
rarity_counter+=1
print("\n")
hero = input("Hero Name: ")
url = ("https://feheroes.wiki/" + hero)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
# This will always have 2 elements
# 0 is for base stats
# 1 is for max stats
stats_tables = soup.find_all("table", class_="wikitable default")
# This is for the base stats TABLE
# Base stats is separated in TABLE
# 0 = Rarity
# 1 = HP
# 2 = ATK
# 3 = SPD
# 4 = DEF
# 5 = RES
base_rows = stats_tables[0].find_all("tr")
# Only get the actual data, not the header
base_stats = (base_rows[1:len(base_rows)])
print_stats(base_stats, hero, "base stats")
# This is for the max stats TABLE
max_rows = stats_tables[1].find_all("tr")
# Only get the actual data, not the header
max_stats = (max_rows[1:len(max_rows)])
print_stats(max_stats, hero, "max stats")
|
<commit_before><commit_msg>Update for Fire Emblem Heroes<commit_after>import requests, json, sys
from bs4 import BeautifulSoup
def print_stats(data, hero, stats):
print(hero + " " + stats)
header = ["Rarity: ", "HP: ", "ATK: ", "SPD: ", "DEF: ", "RES: "]
stat_counter = 0
rarity_counter = 0
for row in data:
table = data[rarity_counter].find_all("td")
for head in header:
print(head + table[stat_counter].text)
stat_counter+=1
stat_counter=0
rarity_counter+=1
print("\n")
hero = input("Hero Name: ")
url = ("https://feheroes.wiki/" + hero)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
# This will always have 2 elements
# 0 is for base stats
# 1 is for max stats
stats_tables = soup.find_all("table", class_="wikitable default")
# This is for the base stats TABLE
# Base stats is separated in TABLE
# 0 = Rarity
# 1 = HP
# 2 = ATK
# 3 = SPD
# 4 = DEF
# 5 = RES
base_rows = stats_tables[0].find_all("tr")
# Only get the actual data, not the header
base_stats = (base_rows[1:len(base_rows)])
print_stats(base_stats, hero, "base stats")
# This is for the max stats TABLE
max_rows = stats_tables[1].find_all("tr")
# Only get the actual data, not the header
max_stats = (max_rows[1:len(max_rows)])
print_stats(max_stats, hero, "max stats")
|
|
cc4d601b2b3e12ad3087f8cb959940192e945df3
|
gem/tests/test_celery.py
|
gem/tests/test_celery.py
|
from django.test import TestCase
from mock import patch
class TestCelery(TestCase):
@patch('django.core.management.call_command')
def test_ensure_search_index_doesnt_call_command(self, call_command_patch):
from gem.celery import ensure_search_index_updated
ensure_search_index_updated('sender', 'instance')
call_command_patch.assert_not_called()
|
Add basic test for celery.py task
|
Add basic test for celery.py task
This file isn't being run by any of the tests so we have no guarantee
that it's even valid Python. Let's at least run it a little bit.
|
Python
|
bsd-2-clause
|
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
|
Add basic test for celery.py task
This file isn't being run by any of the tests so we have no guarantee
that it's even valid Python. Let's at least run it a little bit.
|
from django.test import TestCase
from mock import patch
class TestCelery(TestCase):
@patch('django.core.management.call_command')
def test_ensure_search_index_doesnt_call_command(self, call_command_patch):
from gem.celery import ensure_search_index_updated
ensure_search_index_updated('sender', 'instance')
call_command_patch.assert_not_called()
|
<commit_before><commit_msg>Add basic test for celery.py task
This file isn't being run by any of the tests so we have no guarantee
that it's even valid Python. Let's at least run it a little bit.<commit_after>
|
from django.test import TestCase
from mock import patch
class TestCelery(TestCase):
@patch('django.core.management.call_command')
def test_ensure_search_index_doesnt_call_command(self, call_command_patch):
from gem.celery import ensure_search_index_updated
ensure_search_index_updated('sender', 'instance')
call_command_patch.assert_not_called()
|
Add basic test for celery.py task
This file isn't being run by any of the tests so we have no guarantee
that it's even valid Python. Let's at least run it a little bit.from django.test import TestCase
from mock import patch
class TestCelery(TestCase):
@patch('django.core.management.call_command')
def test_ensure_search_index_doesnt_call_command(self, call_command_patch):
from gem.celery import ensure_search_index_updated
ensure_search_index_updated('sender', 'instance')
call_command_patch.assert_not_called()
|
<commit_before><commit_msg>Add basic test for celery.py task
This file isn't being run by any of the tests so we have no guarantee
that it's even valid Python. Let's at least run it a little bit.<commit_after>from django.test import TestCase
from mock import patch
class TestCelery(TestCase):
@patch('django.core.management.call_command')
def test_ensure_search_index_doesnt_call_command(self, call_command_patch):
from gem.celery import ensure_search_index_updated
ensure_search_index_updated('sender', 'instance')
call_command_patch.assert_not_called()
|
|
88a59ea0d0ca72d6ca8351169c78e84f9afdb454
|
mozillians/users/migrations/0037_auto_20180720_0305.py
|
mozillians/users/migrations/0037_auto_20180720_0305.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-20 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20180704_0634'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='type',
field=models.CharField(choices=[(b'AIM', b'AIM'), (b'BITBUCKET', b'Bitbucket'), (b'BMO', b'Bugzilla (BMO)'), (b'DISCORD', b'Discord'), (b'FACEBOOK', b'Facebook'), (b'LANYRD', b'Lanyrd'), (b'LINKEDIN', b'LinkedIn'), (b'MDN', b'MDN'), (b'MASTODON', b'Mastodon'), (b'AMO', b'Mozilla Add-ons'), (b'DISCOURSE', b'Mozilla Discourse'), (b'MOZILLALOCATION', b'Mozilla Location Service'), (b'MOZPHAB', b'Mozilla Phabricator'), (b'MOZILLAPONTOON', b'Mozilla Pontoon'), (b'REMO', b'Mozilla Reps'), (b'SUMO', b'Mozilla Support'), (b'WEBMAKER', b'Mozilla Webmaker'), (b'MOZILLAWIKI', b'Mozilla Wiki'), (b'Phone (Landline)', b'Phone (Landline)'), (b'Phone (Mobile)', b'Phone (Mobile)'), (b'SKYPE', b'Skype'), (b'SLIDESHARE', b'SlideShare'), (b'TELEGRAM', b'Telegram'), (b'TRANSIFEX', b'Transifex'), (b'TWITTER', b'Twitter'), (b'WEBSITE', b'Website URL'), (b'JABBER', b'XMPP/Jabber'), (b'YAHOO', b'Yahoo! Messenger')], max_length=30, verbose_name='Account Type'),
),
]
|
Add a schema migration for Mozilla Phabricator External Account.
|
Add a schema migration for Mozilla Phabricator External Account.
|
Python
|
bsd-3-clause
|
johngian/mozillians,johngian/mozillians,mozilla/mozillians,mozilla/mozillians,mozilla/mozillians,akatsoulas/mozillians,akatsoulas/mozillians,akatsoulas/mozillians,johngian/mozillians,johngian/mozillians,akatsoulas/mozillians,mozilla/mozillians
|
Add a schema migration for Mozilla Phabricator External Account.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-20 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20180704_0634'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='type',
field=models.CharField(choices=[(b'AIM', b'AIM'), (b'BITBUCKET', b'Bitbucket'), (b'BMO', b'Bugzilla (BMO)'), (b'DISCORD', b'Discord'), (b'FACEBOOK', b'Facebook'), (b'LANYRD', b'Lanyrd'), (b'LINKEDIN', b'LinkedIn'), (b'MDN', b'MDN'), (b'MASTODON', b'Mastodon'), (b'AMO', b'Mozilla Add-ons'), (b'DISCOURSE', b'Mozilla Discourse'), (b'MOZILLALOCATION', b'Mozilla Location Service'), (b'MOZPHAB', b'Mozilla Phabricator'), (b'MOZILLAPONTOON', b'Mozilla Pontoon'), (b'REMO', b'Mozilla Reps'), (b'SUMO', b'Mozilla Support'), (b'WEBMAKER', b'Mozilla Webmaker'), (b'MOZILLAWIKI', b'Mozilla Wiki'), (b'Phone (Landline)', b'Phone (Landline)'), (b'Phone (Mobile)', b'Phone (Mobile)'), (b'SKYPE', b'Skype'), (b'SLIDESHARE', b'SlideShare'), (b'TELEGRAM', b'Telegram'), (b'TRANSIFEX', b'Transifex'), (b'TWITTER', b'Twitter'), (b'WEBSITE', b'Website URL'), (b'JABBER', b'XMPP/Jabber'), (b'YAHOO', b'Yahoo! Messenger')], max_length=30, verbose_name='Account Type'),
),
]
|
<commit_before><commit_msg>Add a schema migration for Mozilla Phabricator External Account.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-20 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20180704_0634'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='type',
field=models.CharField(choices=[(b'AIM', b'AIM'), (b'BITBUCKET', b'Bitbucket'), (b'BMO', b'Bugzilla (BMO)'), (b'DISCORD', b'Discord'), (b'FACEBOOK', b'Facebook'), (b'LANYRD', b'Lanyrd'), (b'LINKEDIN', b'LinkedIn'), (b'MDN', b'MDN'), (b'MASTODON', b'Mastodon'), (b'AMO', b'Mozilla Add-ons'), (b'DISCOURSE', b'Mozilla Discourse'), (b'MOZILLALOCATION', b'Mozilla Location Service'), (b'MOZPHAB', b'Mozilla Phabricator'), (b'MOZILLAPONTOON', b'Mozilla Pontoon'), (b'REMO', b'Mozilla Reps'), (b'SUMO', b'Mozilla Support'), (b'WEBMAKER', b'Mozilla Webmaker'), (b'MOZILLAWIKI', b'Mozilla Wiki'), (b'Phone (Landline)', b'Phone (Landline)'), (b'Phone (Mobile)', b'Phone (Mobile)'), (b'SKYPE', b'Skype'), (b'SLIDESHARE', b'SlideShare'), (b'TELEGRAM', b'Telegram'), (b'TRANSIFEX', b'Transifex'), (b'TWITTER', b'Twitter'), (b'WEBSITE', b'Website URL'), (b'JABBER', b'XMPP/Jabber'), (b'YAHOO', b'Yahoo! Messenger')], max_length=30, verbose_name='Account Type'),
),
]
|
Add a schema migration for Mozilla Phabricator External Account.# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-20 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20180704_0634'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='type',
field=models.CharField(choices=[(b'AIM', b'AIM'), (b'BITBUCKET', b'Bitbucket'), (b'BMO', b'Bugzilla (BMO)'), (b'DISCORD', b'Discord'), (b'FACEBOOK', b'Facebook'), (b'LANYRD', b'Lanyrd'), (b'LINKEDIN', b'LinkedIn'), (b'MDN', b'MDN'), (b'MASTODON', b'Mastodon'), (b'AMO', b'Mozilla Add-ons'), (b'DISCOURSE', b'Mozilla Discourse'), (b'MOZILLALOCATION', b'Mozilla Location Service'), (b'MOZPHAB', b'Mozilla Phabricator'), (b'MOZILLAPONTOON', b'Mozilla Pontoon'), (b'REMO', b'Mozilla Reps'), (b'SUMO', b'Mozilla Support'), (b'WEBMAKER', b'Mozilla Webmaker'), (b'MOZILLAWIKI', b'Mozilla Wiki'), (b'Phone (Landline)', b'Phone (Landline)'), (b'Phone (Mobile)', b'Phone (Mobile)'), (b'SKYPE', b'Skype'), (b'SLIDESHARE', b'SlideShare'), (b'TELEGRAM', b'Telegram'), (b'TRANSIFEX', b'Transifex'), (b'TWITTER', b'Twitter'), (b'WEBSITE', b'Website URL'), (b'JABBER', b'XMPP/Jabber'), (b'YAHOO', b'Yahoo! Messenger')], max_length=30, verbose_name='Account Type'),
),
]
|
<commit_before><commit_msg>Add a schema migration for Mozilla Phabricator External Account.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-20 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0036_auto_20180704_0634'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='type',
field=models.CharField(choices=[(b'AIM', b'AIM'), (b'BITBUCKET', b'Bitbucket'), (b'BMO', b'Bugzilla (BMO)'), (b'DISCORD', b'Discord'), (b'FACEBOOK', b'Facebook'), (b'LANYRD', b'Lanyrd'), (b'LINKEDIN', b'LinkedIn'), (b'MDN', b'MDN'), (b'MASTODON', b'Mastodon'), (b'AMO', b'Mozilla Add-ons'), (b'DISCOURSE', b'Mozilla Discourse'), (b'MOZILLALOCATION', b'Mozilla Location Service'), (b'MOZPHAB', b'Mozilla Phabricator'), (b'MOZILLAPONTOON', b'Mozilla Pontoon'), (b'REMO', b'Mozilla Reps'), (b'SUMO', b'Mozilla Support'), (b'WEBMAKER', b'Mozilla Webmaker'), (b'MOZILLAWIKI', b'Mozilla Wiki'), (b'Phone (Landline)', b'Phone (Landline)'), (b'Phone (Mobile)', b'Phone (Mobile)'), (b'SKYPE', b'Skype'), (b'SLIDESHARE', b'SlideShare'), (b'TELEGRAM', b'Telegram'), (b'TRANSIFEX', b'Transifex'), (b'TWITTER', b'Twitter'), (b'WEBSITE', b'Website URL'), (b'JABBER', b'XMPP/Jabber'), (b'YAHOO', b'Yahoo! Messenger')], max_length=30, verbose_name='Account Type'),
),
]
|
|
dec30baf66f852c272642e8b7857001767875a5f
|
cmfieldguide/cmsdetector/signatures/hippo.py
|
cmfieldguide/cmsdetector/signatures/hippo.py
|
"""
This signature contains tests to see if the site is running on Hippo CMS.
"""
__author__ = "Jeroen Reijn"
__copyright__ = "CM Fieldguide"
__credits__ = ["Jeroen Reijn",]
__license__ = "Unlicense"
__version__ = "0.1"
__maintainer__ = "Jeroen Reijn"
__email__ = "j.reijn@onehippo.com"
__status__ = "Experimental"
from cmfieldguide.cmsdetector.signatures import BaseSignature
class Signature(BaseSignature):
NAME = 'Hippo CMS'
WEBSITE = 'http://www.onehippo.com/en/products/cms'
KNOWN_POSITIVE = 'www.onehippo.com'
TECHNOLOGY = 'JAVA'
def test_binaries_file_paths(self, site):
"""
Hippo CMS exposes image data generally from the binaries path.
"""
if site.home_page.contains_any_pattern(
['/binaries/content/gallery/']
):
return 1
else:
return 0
|
Add first working attempt at Hippo CMS signature
|
Add first working attempt at Hippo CMS signature
|
Python
|
unlicense
|
sggottlieb/cmfieldguide
|
Add first working attempt at Hippo CMS signature
|
"""
This signature contains tests to see if the site is running on Hippo CMS.
"""
__author__ = "Jeroen Reijn"
__copyright__ = "CM Fieldguide"
__credits__ = ["Jeroen Reijn",]
__license__ = "Unlicense"
__version__ = "0.1"
__maintainer__ = "Jeroen Reijn"
__email__ = "j.reijn@onehippo.com"
__status__ = "Experimental"
from cmfieldguide.cmsdetector.signatures import BaseSignature
class Signature(BaseSignature):
NAME = 'Hippo CMS'
WEBSITE = 'http://www.onehippo.com/en/products/cms'
KNOWN_POSITIVE = 'www.onehippo.com'
TECHNOLOGY = 'JAVA'
def test_binaries_file_paths(self, site):
"""
Hippo CMS exposes image data generally from the binaries path.
"""
if site.home_page.contains_any_pattern(
['/binaries/content/gallery/']
):
return 1
else:
return 0
|
<commit_before><commit_msg>Add first working attempt at Hippo CMS signature<commit_after>
|
"""
This signature contains tests to see if the site is running on Hippo CMS.
"""
__author__ = "Jeroen Reijn"
__copyright__ = "CM Fieldguide"
__credits__ = ["Jeroen Reijn",]
__license__ = "Unlicense"
__version__ = "0.1"
__maintainer__ = "Jeroen Reijn"
__email__ = "j.reijn@onehippo.com"
__status__ = "Experimental"
from cmfieldguide.cmsdetector.signatures import BaseSignature
class Signature(BaseSignature):
NAME = 'Hippo CMS'
WEBSITE = 'http://www.onehippo.com/en/products/cms'
KNOWN_POSITIVE = 'www.onehippo.com'
TECHNOLOGY = 'JAVA'
def test_binaries_file_paths(self, site):
"""
Hippo CMS exposes image data generally from the binaries path.
"""
if site.home_page.contains_any_pattern(
['/binaries/content/gallery/']
):
return 1
else:
return 0
|
Add first working attempt at Hippo CMS signature"""
This signature contains tests to see if the site is running on Hippo CMS.
"""
__author__ = "Jeroen Reijn"
__copyright__ = "CM Fieldguide"
__credits__ = ["Jeroen Reijn",]
__license__ = "Unlicense"
__version__ = "0.1"
__maintainer__ = "Jeroen Reijn"
__email__ = "j.reijn@onehippo.com"
__status__ = "Experimental"
from cmfieldguide.cmsdetector.signatures import BaseSignature
class Signature(BaseSignature):
NAME = 'Hippo CMS'
WEBSITE = 'http://www.onehippo.com/en/products/cms'
KNOWN_POSITIVE = 'www.onehippo.com'
TECHNOLOGY = 'JAVA'
def test_binaries_file_paths(self, site):
"""
Hippo CMS exposes image data generally from the binaries path.
"""
if site.home_page.contains_any_pattern(
['/binaries/content/gallery/']
):
return 1
else:
return 0
|
<commit_before><commit_msg>Add first working attempt at Hippo CMS signature<commit_after>"""
This signature contains tests to see if the site is running on Hippo CMS.
"""
__author__ = "Jeroen Reijn"
__copyright__ = "CM Fieldguide"
__credits__ = ["Jeroen Reijn",]
__license__ = "Unlicense"
__version__ = "0.1"
__maintainer__ = "Jeroen Reijn"
__email__ = "j.reijn@onehippo.com"
__status__ = "Experimental"
from cmfieldguide.cmsdetector.signatures import BaseSignature
class Signature(BaseSignature):
NAME = 'Hippo CMS'
WEBSITE = 'http://www.onehippo.com/en/products/cms'
KNOWN_POSITIVE = 'www.onehippo.com'
TECHNOLOGY = 'JAVA'
def test_binaries_file_paths(self, site):
"""
Hippo CMS exposes image data generally from the binaries path.
"""
if site.home_page.contains_any_pattern(
['/binaries/content/gallery/']
):
return 1
else:
return 0
|
|
7564e35bbf9a4d09fb24326dae55d14fe987fbcf
|
workflows/logging/test_logging.py
|
workflows/logging/test_logging.py
|
from __future__ import absolute_import, division
import logging
import mock
import workflows.logging
def test_callback_handler_works_within_logging_framework():
cbmock = mock.Mock()
logmsg = 'Test message for callback'
log = logging.getLogger('workflows.tests.logging.callback')
log.setLevel(logging.INFO)
cbh = workflows.logging.CallbackHandler(cbmock)
log.addHandler(workflows.logging.CallbackHandler(cbmock))
log.info(logmsg)
cbmock.assert_called_once()
assert cbmock.call_args == ((mock.ANY,), {})
logrec = cbmock.call_args[0][0]
assert isinstance(logrec, logging.LogRecord)
assert logrec.name == 'workflows.tests.logging.callback'
assert logrec.levelname == 'INFO'
assert logrec.message == logmsg
assert logrec.funcName.startswith('test_')
|
Add test for logging callback handler
|
Add test for logging callback handler
|
Python
|
bsd-3-clause
|
DiamondLightSource/python-workflows,xia2/workflows
|
Add test for logging callback handler
|
from __future__ import absolute_import, division
import logging
import mock
import workflows.logging
def test_callback_handler_works_within_logging_framework():
cbmock = mock.Mock()
logmsg = 'Test message for callback'
log = logging.getLogger('workflows.tests.logging.callback')
log.setLevel(logging.INFO)
cbh = workflows.logging.CallbackHandler(cbmock)
log.addHandler(workflows.logging.CallbackHandler(cbmock))
log.info(logmsg)
cbmock.assert_called_once()
assert cbmock.call_args == ((mock.ANY,), {})
logrec = cbmock.call_args[0][0]
assert isinstance(logrec, logging.LogRecord)
assert logrec.name == 'workflows.tests.logging.callback'
assert logrec.levelname == 'INFO'
assert logrec.message == logmsg
assert logrec.funcName.startswith('test_')
|
<commit_before><commit_msg>Add test for logging callback handler<commit_after>
|
from __future__ import absolute_import, division
import logging
import mock
import workflows.logging
def test_callback_handler_works_within_logging_framework():
cbmock = mock.Mock()
logmsg = 'Test message for callback'
log = logging.getLogger('workflows.tests.logging.callback')
log.setLevel(logging.INFO)
cbh = workflows.logging.CallbackHandler(cbmock)
log.addHandler(workflows.logging.CallbackHandler(cbmock))
log.info(logmsg)
cbmock.assert_called_once()
assert cbmock.call_args == ((mock.ANY,), {})
logrec = cbmock.call_args[0][0]
assert isinstance(logrec, logging.LogRecord)
assert logrec.name == 'workflows.tests.logging.callback'
assert logrec.levelname == 'INFO'
assert logrec.message == logmsg
assert logrec.funcName.startswith('test_')
|
Add test for logging callback handlerfrom __future__ import absolute_import, division
import logging
import mock
import workflows.logging
def test_callback_handler_works_within_logging_framework():
cbmock = mock.Mock()
logmsg = 'Test message for callback'
log = logging.getLogger('workflows.tests.logging.callback')
log.setLevel(logging.INFO)
cbh = workflows.logging.CallbackHandler(cbmock)
log.addHandler(workflows.logging.CallbackHandler(cbmock))
log.info(logmsg)
cbmock.assert_called_once()
assert cbmock.call_args == ((mock.ANY,), {})
logrec = cbmock.call_args[0][0]
assert isinstance(logrec, logging.LogRecord)
assert logrec.name == 'workflows.tests.logging.callback'
assert logrec.levelname == 'INFO'
assert logrec.message == logmsg
assert logrec.funcName.startswith('test_')
|
<commit_before><commit_msg>Add test for logging callback handler<commit_after>from __future__ import absolute_import, division
import logging
import mock
import workflows.logging
def test_callback_handler_works_within_logging_framework():
cbmock = mock.Mock()
logmsg = 'Test message for callback'
log = logging.getLogger('workflows.tests.logging.callback')
log.setLevel(logging.INFO)
cbh = workflows.logging.CallbackHandler(cbmock)
log.addHandler(workflows.logging.CallbackHandler(cbmock))
log.info(logmsg)
cbmock.assert_called_once()
assert cbmock.call_args == ((mock.ANY,), {})
logrec = cbmock.call_args[0][0]
assert isinstance(logrec, logging.LogRecord)
assert logrec.name == 'workflows.tests.logging.callback'
assert logrec.levelname == 'INFO'
assert logrec.message == logmsg
assert logrec.funcName.startswith('test_')
|
|
18fa807ad107fe4c6301087137920f4a563341b3
|
Instanssi/arkistoija/forms.py
|
Instanssi/arkistoija/forms.py
|
# -*- coding: utf-8 -*-
from django import forms
from uni_form.helper import FormHelper
from uni_form.layout import Submit, Layout, Fieldset, ButtonHolder
from django.core.exceptions import ValidationError
import os
class EventForm(forms.Form):
name = forms.CharField(label=u'Nimi', max_length=32)
date = forms.DateField(label=u'Päivämäärä')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'date',
)
|
Add form for getting event data on Arkistoija app
|
Add form for getting event data on Arkistoija app
|
Python
|
mit
|
Instanssi/Instanssi.org,Instanssi/Instanssi.org,Instanssi/Instanssi.org,Instanssi/Instanssi.org
|
Add form for getting event data on Arkistoija app
|
# -*- coding: utf-8 -*-
from django import forms
from uni_form.helper import FormHelper
from uni_form.layout import Submit, Layout, Fieldset, ButtonHolder
from django.core.exceptions import ValidationError
import os
class EventForm(forms.Form):
name = forms.CharField(label=u'Nimi', max_length=32)
date = forms.DateField(label=u'Päivämäärä')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'date',
)
|
<commit_before><commit_msg>Add form for getting event data on Arkistoija app<commit_after>
|
# -*- coding: utf-8 -*-
from django import forms
from uni_form.helper import FormHelper
from uni_form.layout import Submit, Layout, Fieldset, ButtonHolder
from django.core.exceptions import ValidationError
import os
class EventForm(forms.Form):
name = forms.CharField(label=u'Nimi', max_length=32)
date = forms.DateField(label=u'Päivämäärä')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'date',
)
|
Add form for getting event data on Arkistoija app# -*- coding: utf-8 -*-
from django import forms
from uni_form.helper import FormHelper
from uni_form.layout import Submit, Layout, Fieldset, ButtonHolder
from django.core.exceptions import ValidationError
import os
class EventForm(forms.Form):
name = forms.CharField(label=u'Nimi', max_length=32)
date = forms.DateField(label=u'Päivämäärä')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'date',
)
|
<commit_before><commit_msg>Add form for getting event data on Arkistoija app<commit_after># -*- coding: utf-8 -*-
from django import forms
from uni_form.helper import FormHelper
from uni_form.layout import Submit, Layout, Fieldset, ButtonHolder
from django.core.exceptions import ValidationError
import os
class EventForm(forms.Form):
name = forms.CharField(label=u'Nimi', max_length=32)
date = forms.DateField(label=u'Päivämäärä')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'date',
)
|
|
e66f9b6e6cb85392e1be9cf0733a3b358c43f340
|
hacks/compress_folder.py
|
hacks/compress_folder.py
|
#!/bin/env python
"""
Compress and delete a folder if 50% of the files inside it are less than 2MB.
It only considers files in the first level of the folder.
"""
import os
import shutil
import sys
import tarfile
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def tar_dir(dest, source):
dirname = os.path.dirname(source)
with cd(dirname):
basename = os.path.basename(source)
with tarfile.open(dest, mode='w:gz') as archive:
for i in os.listdir(source):
archive.add(os.path.join(basename, i))
def main(dirname, mb_threshold=2):
if not os.path.exists(dirname) or not os.path.isdir(dirname):
print 'Unexistent folder: {}. Bye!'.format(dirname)
return None
# Grab file sizes in MB
files_size = [os.path.getsize(os.path.join(dirname, f)) / 1024 / 1024
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f))]
if len(files_size) < 1:
print 'Skip folder compression. No files inside folder.'
return None
# Compute median of file sizes
n_files = len(files_size)
files_size.sort()
mb_median = files_size[n_files/2]
if mb_median > mb_threshold:
print 'Skip folder compression. Good amount of big files.'
return None
source, basename = os.path.abspath(dirname), os.path.basename(dirname)
destination = os.path.join(os.path.dirname(source), basename + '.tar.gz')
print 'Compressing file onto {}'.format(destination)
tar_dir(destination, source)
try:
with tarfile.open(destination, 'r') as fid:
successfull_compresion = True
fid.close()
except:
successfull_compresion = False
print 'Unknown error. Deleting compressed file:'
os.remove(destination)
if successfull_compresion:
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'compress_folder.py [folder]'
main(sys.argv[1])
|
Compress folder with small files
|
[hacks] Compress folder with small files
|
Python
|
mit
|
escorciav/linux-utils,escorciav/linux-utils
|
[hacks] Compress folder with small files
|
#!/bin/env python
"""
Compress and delete a folder if 50% of the files inside it are less than 2MB.
It only considers files in the first level of the folder.
"""
import os
import shutil
import sys
import tarfile
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def tar_dir(dest, source):
dirname = os.path.dirname(source)
with cd(dirname):
basename = os.path.basename(source)
with tarfile.open(dest, mode='w:gz') as archive:
for i in os.listdir(source):
archive.add(os.path.join(basename, i))
def main(dirname, mb_threshold=2):
if not os.path.exists(dirname) or not os.path.isdir(dirname):
print 'Unexistent folder: {}. Bye!'.format(dirname)
return None
# Grab file sizes in MB
files_size = [os.path.getsize(os.path.join(dirname, f)) / 1024 / 1024
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f))]
if len(files_size) < 1:
print 'Skip folder compression. No files inside folder.'
return None
# Compute median of file sizes
n_files = len(files_size)
files_size.sort()
mb_median = files_size[n_files/2]
if mb_median > mb_threshold:
print 'Skip folder compression. Good amount of big files.'
return None
source, basename = os.path.abspath(dirname), os.path.basename(dirname)
destination = os.path.join(os.path.dirname(source), basename + '.tar.gz')
print 'Compressing file onto {}'.format(destination)
tar_dir(destination, source)
try:
with tarfile.open(destination, 'r') as fid:
successfull_compresion = True
fid.close()
except:
successfull_compresion = False
print 'Unknown error. Deleting compressed file:'
os.remove(destination)
if successfull_compresion:
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'compress_folder.py [folder]'
main(sys.argv[1])
|
<commit_before><commit_msg>[hacks] Compress folder with small files<commit_after>
|
#!/bin/env python
"""
Compress and delete a folder if 50% of the files inside it are less than 2MB.
It only considers files in the first level of the folder.
"""
import os
import shutil
import sys
import tarfile
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def tar_dir(dest, source):
dirname = os.path.dirname(source)
with cd(dirname):
basename = os.path.basename(source)
with tarfile.open(dest, mode='w:gz') as archive:
for i in os.listdir(source):
archive.add(os.path.join(basename, i))
def main(dirname, mb_threshold=2):
if not os.path.exists(dirname) or not os.path.isdir(dirname):
print 'Unexistent folder: {}. Bye!'.format(dirname)
return None
# Grab file sizes in MB
files_size = [os.path.getsize(os.path.join(dirname, f)) / 1024 / 1024
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f))]
if len(files_size) < 1:
print 'Skip folder compression. No files inside folder.'
return None
# Compute median of file sizes
n_files = len(files_size)
files_size.sort()
mb_median = files_size[n_files/2]
if mb_median > mb_threshold:
print 'Skip folder compression. Good amount of big files.'
return None
source, basename = os.path.abspath(dirname), os.path.basename(dirname)
destination = os.path.join(os.path.dirname(source), basename + '.tar.gz')
print 'Compressing file onto {}'.format(destination)
tar_dir(destination, source)
try:
with tarfile.open(destination, 'r') as fid:
successfull_compresion = True
fid.close()
except:
successfull_compresion = False
print 'Unknown error. Deleting compressed file:'
os.remove(destination)
if successfull_compresion:
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'compress_folder.py [folder]'
main(sys.argv[1])
|
[hacks] Compress folder with small files#!/bin/env python
"""
Compress and delete a folder if 50% of the files inside it are less than 2MB.
It only considers files in the first level of the folder.
"""
import os
import shutil
import sys
import tarfile
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def tar_dir(dest, source):
dirname = os.path.dirname(source)
with cd(dirname):
basename = os.path.basename(source)
with tarfile.open(dest, mode='w:gz') as archive:
for i in os.listdir(source):
archive.add(os.path.join(basename, i))
def main(dirname, mb_threshold=2):
if not os.path.exists(dirname) or not os.path.isdir(dirname):
print 'Unexistent folder: {}. Bye!'.format(dirname)
return None
# Grab file sizes in MB
files_size = [os.path.getsize(os.path.join(dirname, f)) / 1024 / 1024
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f))]
if len(files_size) < 1:
print 'Skip folder compression. No files inside folder.'
return None
# Compute median of file sizes
n_files = len(files_size)
files_size.sort()
mb_median = files_size[n_files/2]
if mb_median > mb_threshold:
print 'Skip folder compression. Good amount of big files.'
return None
source, basename = os.path.abspath(dirname), os.path.basename(dirname)
destination = os.path.join(os.path.dirname(source), basename + '.tar.gz')
print 'Compressing file onto {}'.format(destination)
tar_dir(destination, source)
try:
with tarfile.open(destination, 'r') as fid:
successfull_compresion = True
fid.close()
except:
successfull_compresion = False
print 'Unknown error. Deleting compressed file:'
os.remove(destination)
if successfull_compresion:
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'compress_folder.py [folder]'
main(sys.argv[1])
|
<commit_before><commit_msg>[hacks] Compress folder with small files<commit_after>#!/bin/env python
"""
Compress and delete a folder if 50% of the files inside it are less than 2MB.
It only considers files in the first level of the folder.
"""
import os
import shutil
import sys
import tarfile
from contextlib import contextmanager
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def tar_dir(dest, source):
dirname = os.path.dirname(source)
with cd(dirname):
basename = os.path.basename(source)
with tarfile.open(dest, mode='w:gz') as archive:
for i in os.listdir(source):
archive.add(os.path.join(basename, i))
def main(dirname, mb_threshold=2):
if not os.path.exists(dirname) or not os.path.isdir(dirname):
print 'Unexistent folder: {}. Bye!'.format(dirname)
return None
# Grab file sizes in MB
files_size = [os.path.getsize(os.path.join(dirname, f)) / 1024 / 1024
for f in os.listdir(dirname)
if os.path.isfile(os.path.join(dirname, f))]
if len(files_size) < 1:
print 'Skip folder compression. No files inside folder.'
return None
# Compute median of file sizes
n_files = len(files_size)
files_size.sort()
mb_median = files_size[n_files/2]
if mb_median > mb_threshold:
print 'Skip folder compression. Good amount of big files.'
return None
source, basename = os.path.abspath(dirname), os.path.basename(dirname)
destination = os.path.join(os.path.dirname(source), basename + '.tar.gz')
print 'Compressing file onto {}'.format(destination)
tar_dir(destination, source)
try:
with tarfile.open(destination, 'r') as fid:
successfull_compresion = True
fid.close()
except:
successfull_compresion = False
print 'Unknown error. Deleting compressed file:'
os.remove(destination)
if successfull_compresion:
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'compress_folder.py [folder]'
main(sys.argv[1])
|
|
c31bde7660d8fbaa6d2e1404defb3f9d1ca13fea
|
chandra_aca/tests/test_dark_scale.py
|
chandra_aca/tests/test_dark_scale.py
|
import numpy as np
from .. import dark_scale
def test_dark_temp_scale():
scale = dark_scale.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
scale = dark_scale.dark_temp_scale(-10., -14, scale_4c=2.0)
assert scale == 0.5 # Should be an exact match
|
Add dark cal scale test
|
Add dark cal scale test
|
Python
|
bsd-2-clause
|
sot/chandra_aca,sot/chandra_aca
|
Add dark cal scale test
|
import numpy as np
from .. import dark_scale
def test_dark_temp_scale():
scale = dark_scale.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
scale = dark_scale.dark_temp_scale(-10., -14, scale_4c=2.0)
assert scale == 0.5 # Should be an exact match
|
<commit_before><commit_msg>Add dark cal scale test<commit_after>
|
import numpy as np
from .. import dark_scale
def test_dark_temp_scale():
scale = dark_scale.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
scale = dark_scale.dark_temp_scale(-10., -14, scale_4c=2.0)
assert scale == 0.5 # Should be an exact match
|
Add dark cal scale testimport numpy as np
from .. import dark_scale
def test_dark_temp_scale():
scale = dark_scale.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
scale = dark_scale.dark_temp_scale(-10., -14, scale_4c=2.0)
assert scale == 0.5 # Should be an exact match
|
<commit_before><commit_msg>Add dark cal scale test<commit_after>import numpy as np
from .. import dark_scale
def test_dark_temp_scale():
scale = dark_scale.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
scale = dark_scale.dark_temp_scale(-10., -14, scale_4c=2.0)
assert scale == 0.5 # Should be an exact match
|
|
4c8990d2e5a872da6083f2027e9c0d91e021bafd
|
samples/python/queue_create_alias.py
|
samples/python/queue_create_alias.py
|
'''
This sample will create a new alias queue.
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Create alias queue',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-a', '--alias', help='Name of the alias queue', required=True)
parser.add_argument('-t', '--target', help='Name of the target queue', required=True)
args = parser.parse_args()
url = "/api/queue/create/" + args.queuemanager
input = {
'QName' : args.alias,
'QType' : 'Alias',
'BaseObjectName' : args.target,
'BaseType' : 'Queue'
}
try:
conn = httplib.HTTPConnection('localhost', 8081)
headers = {
'Content-Type': 'application/json'
}
conn.request('POST', url, json.dumps(input), headers)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code']) + ' - ' +
result['error']['reason']['desc']
)
else:
print('Alias ' + args.alias + ' for ' + args.target + ' created on ' + args.queuemanager)
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queues: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
Add create alias queue sample
|
Add create alias queue sample
|
Python
|
mit
|
fbraem/mqweb,fbraem/mqweb,fbraem/mqweb
|
Add create alias queue sample
|
'''
This sample will create a new alias queue.
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Create alias queue',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-a', '--alias', help='Name of the alias queue', required=True)
parser.add_argument('-t', '--target', help='Name of the target queue', required=True)
args = parser.parse_args()
url = "/api/queue/create/" + args.queuemanager
input = {
'QName' : args.alias,
'QType' : 'Alias',
'BaseObjectName' : args.target,
'BaseType' : 'Queue'
}
try:
conn = httplib.HTTPConnection('localhost', 8081)
headers = {
'Content-Type': 'application/json'
}
conn.request('POST', url, json.dumps(input), headers)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code']) + ' - ' +
result['error']['reason']['desc']
)
else:
print('Alias ' + args.alias + ' for ' + args.target + ' created on ' + args.queuemanager)
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queues: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
<commit_before><commit_msg>Add create alias queue sample<commit_after>
|
'''
This sample will create a new alias queue.
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Create alias queue',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-a', '--alias', help='Name of the alias queue', required=True)
parser.add_argument('-t', '--target', help='Name of the target queue', required=True)
args = parser.parse_args()
url = "/api/queue/create/" + args.queuemanager
input = {
'QName' : args.alias,
'QType' : 'Alias',
'BaseObjectName' : args.target,
'BaseType' : 'Queue'
}
try:
conn = httplib.HTTPConnection('localhost', 8081)
headers = {
'Content-Type': 'application/json'
}
conn.request('POST', url, json.dumps(input), headers)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code']) + ' - ' +
result['error']['reason']['desc']
)
else:
print('Alias ' + args.alias + ' for ' + args.target + ' created on ' + args.queuemanager)
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queues: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
Add create alias queue sample'''
This sample will create a new alias queue.
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Create alias queue',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-a', '--alias', help='Name of the alias queue', required=True)
parser.add_argument('-t', '--target', help='Name of the target queue', required=True)
args = parser.parse_args()
url = "/api/queue/create/" + args.queuemanager
input = {
'QName' : args.alias,
'QType' : 'Alias',
'BaseObjectName' : args.target,
'BaseType' : 'Queue'
}
try:
conn = httplib.HTTPConnection('localhost', 8081)
headers = {
'Content-Type': 'application/json'
}
conn.request('POST', url, json.dumps(input), headers)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code']) + ' - ' +
result['error']['reason']['desc']
)
else:
print('Alias ' + args.alias + ' for ' + args.target + ' created on ' + args.queuemanager)
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queues: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
<commit_before><commit_msg>Add create alias queue sample<commit_after>'''
This sample will create a new alias queue.
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Create alias queue',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-a', '--alias', help='Name of the alias queue', required=True)
parser.add_argument('-t', '--target', help='Name of the target queue', required=True)
args = parser.parse_args()
url = "/api/queue/create/" + args.queuemanager
input = {
'QName' : args.alias,
'QType' : 'Alias',
'BaseObjectName' : args.target,
'BaseType' : 'Queue'
}
try:
conn = httplib.HTTPConnection('localhost', 8081)
headers = {
'Content-Type': 'application/json'
}
conn.request('POST', url, json.dumps(input), headers)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code']) + ' - ' +
result['error']['reason']['desc']
)
else:
print('Alias ' + args.alias + ' for ' + args.target + ' created on ' + args.queuemanager)
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queues: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
|
cbaf9d3c8cf8f11e49f0edfa87b15bc925dc7d92
|
core/management/commands/delete_old_sessions.py
|
core/management/commands/delete_old_sessions.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions command
|
Add delete old sessions command
|
Python
|
mit
|
QLGu/djangopackages,pydanny/djangopackages,pydanny/djangopackages,nanuxbe/djangopackages,QLGu/djangopackages,nanuxbe/djangopackages,nanuxbe/djangopackages,QLGu/djangopackages,pydanny/djangopackages
|
Add delete old sessions command
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions commandfrom datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
|
f8d5db373333145f8751ce8a4630eaf2ab02fcc7
|
Lib/test/crashers/file_threads.py
|
Lib/test/crashers/file_threads.py
|
# An example for http://bugs.python.org/issue815646
import thread
while 1:
f = open("/tmp/dupa", "w")
thread.start_new_thread(f.close, ())
f.close()
|
Add a crasher for the thread-unsafety of file objects.
|
Add a crasher for the thread-unsafety of file objects.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add a crasher for the thread-unsafety of file objects.
|
# An example for http://bugs.python.org/issue815646
import thread
while 1:
f = open("/tmp/dupa", "w")
thread.start_new_thread(f.close, ())
f.close()
|
<commit_before><commit_msg>Add a crasher for the thread-unsafety of file objects.<commit_after>
|
# An example for http://bugs.python.org/issue815646
import thread
while 1:
f = open("/tmp/dupa", "w")
thread.start_new_thread(f.close, ())
f.close()
|
Add a crasher for the thread-unsafety of file objects.# An example for http://bugs.python.org/issue815646
import thread
while 1:
f = open("/tmp/dupa", "w")
thread.start_new_thread(f.close, ())
f.close()
|
<commit_before><commit_msg>Add a crasher for the thread-unsafety of file objects.<commit_after># An example for http://bugs.python.org/issue815646
import thread
while 1:
f = open("/tmp/dupa", "w")
thread.start_new_thread(f.close, ())
f.close()
|
|
f182dc362c40b7dc1a0afe7cad5721a271bccbe0
|
DBNsite/testing_DBNlogic/sets_test.py
|
DBNsite/testing_DBNlogic/sets_test.py
|
import pytest
import os
import numpy as np
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(myPath, '..'))
from DBNlogic.sets import exists, DataSet, MNIST, SmallerMNIST
def test_exists_true():
"""The function `exists` returns True if
given the path of an existing file."""
file = open('test.test', 'w+')
file.close()
assert exists('test.test')
os.remove('test.test')
def test_exists_false():
"""The function `exists` returns False if
given the path of an unexisting file."""
assert exists('test.test') == False
|
Add tests for back end
|
Add tests for back end
|
Python
|
mit
|
ggiuffre/DBNsim,ggiuffre/DBNsim,ggiuffre/DBNsim
|
Add tests for back end
|
import pytest
import os
import numpy as np
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(myPath, '..'))
from DBNlogic.sets import exists, DataSet, MNIST, SmallerMNIST
def test_exists_true():
"""The function `exists` returns True if
given the path of an existing file."""
file = open('test.test', 'w+')
file.close()
assert exists('test.test')
os.remove('test.test')
def test_exists_false():
"""The function `exists` returns False if
given the path of an unexisting file."""
assert exists('test.test') == False
|
<commit_before><commit_msg>Add tests for back end<commit_after>
|
import pytest
import os
import numpy as np
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(myPath, '..'))
from DBNlogic.sets import exists, DataSet, MNIST, SmallerMNIST
def test_exists_true():
"""The function `exists` returns True if
given the path of an existing file."""
file = open('test.test', 'w+')
file.close()
assert exists('test.test')
os.remove('test.test')
def test_exists_false():
"""The function `exists` returns False if
given the path of an unexisting file."""
assert exists('test.test') == False
|
Add tests for back endimport pytest
import os
import numpy as np
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(myPath, '..'))
from DBNlogic.sets import exists, DataSet, MNIST, SmallerMNIST
def test_exists_true():
"""The function `exists` returns True if
given the path of an existing file."""
file = open('test.test', 'w+')
file.close()
assert exists('test.test')
os.remove('test.test')
def test_exists_false():
"""The function `exists` returns False if
given the path of an unexisting file."""
assert exists('test.test') == False
|
<commit_before><commit_msg>Add tests for back end<commit_after>import pytest
import os
import numpy as np
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(myPath, '..'))
from DBNlogic.sets import exists, DataSet, MNIST, SmallerMNIST
def test_exists_true():
"""The function `exists` returns True if
given the path of an existing file."""
file = open('test.test', 'w+')
file.close()
assert exists('test.test')
os.remove('test.test')
def test_exists_false():
"""The function `exists` returns False if
given the path of an unexisting file."""
assert exists('test.test') == False
|
|
ba0e03cfdf3495b96d68f264e9e6406cd7ab0a59
|
tests/integration/test_end_to_end.py
|
tests/integration/test_end_to_end.py
|
"""Tests updating config of a real org and repo under the dothub-sandbox organization"""
import pytest
from click.testing import CliRunner
import tempfile
from dothub._main import dothub
from dothub import utils
import os
DOTHUB_TOKEN = os.environ.get("DOTHUB_USER_TOKEN")
CLI_BASE_ARGS = ["--user=dothub-user", "--token=" + str(DOTHUB_TOKEN)]
ORG_CONFIG = {
'teams': {
},
'hooks': {
},
'members': {
'dothub-bot': {
'role': 'admin'
},
'Mariocj89': {
'role': 'admin'
}
},
'options': {
'billing_email': 'mariocj89+dothub@gmail.com',
'description': 'Test updating the description',
'location': None,
'company': None,
'name': 'New name',
'email': None
}
}
skip_on_no_token = pytest.mark.skipif(
not DOTHUB_TOKEN,
reason="Missing DOTHUB user token for integration tests"
)
@pytest.yield_fixture()
def preserve_org():
"""Saves the org config and restores it at the end of each test"""
with tempfile.NamedTemporaryFile() as original_config:
original_config_file = original_config.name
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
yield
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
@skip_on_no_token
def test_configure_org(preserve_org):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as test_config:
test_config_file = test_config.name
# ########################
# Test updating the config
# ########################
utils.serialize_yaml(ORG_CONFIG, test_config_file)
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
# ##########################
# Test retrieving the config
# ##########################
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
assert ORG_CONFIG == utils.load_yaml(test_config_file)
|
Add org end to end testing
|
Add org end to end testing
|
Python
|
mit
|
Mariocj89/dothub
|
Add org end to end testing
|
"""Tests updating config of a real org and repo under the dothub-sandbox organization"""
import pytest
from click.testing import CliRunner
import tempfile
from dothub._main import dothub
from dothub import utils
import os
DOTHUB_TOKEN = os.environ.get("DOTHUB_USER_TOKEN")
CLI_BASE_ARGS = ["--user=dothub-user", "--token=" + str(DOTHUB_TOKEN)]
ORG_CONFIG = {
'teams': {
},
'hooks': {
},
'members': {
'dothub-bot': {
'role': 'admin'
},
'Mariocj89': {
'role': 'admin'
}
},
'options': {
'billing_email': 'mariocj89+dothub@gmail.com',
'description': 'Test updating the description',
'location': None,
'company': None,
'name': 'New name',
'email': None
}
}
skip_on_no_token = pytest.mark.skipif(
not DOTHUB_TOKEN,
reason="Missing DOTHUB user token for integration tests"
)
@pytest.yield_fixture()
def preserve_org():
"""Saves the org config and restores it at the end of each test"""
with tempfile.NamedTemporaryFile() as original_config:
original_config_file = original_config.name
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
yield
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
@skip_on_no_token
def test_configure_org(preserve_org):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as test_config:
test_config_file = test_config.name
# ########################
# Test updating the config
# ########################
utils.serialize_yaml(ORG_CONFIG, test_config_file)
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
# ##########################
# Test retrieving the config
# ##########################
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
assert ORG_CONFIG == utils.load_yaml(test_config_file)
|
<commit_before><commit_msg>Add org end to end testing<commit_after>
|
"""Tests updating config of a real org and repo under the dothub-sandbox organization"""
import pytest
from click.testing import CliRunner
import tempfile
from dothub._main import dothub
from dothub import utils
import os
DOTHUB_TOKEN = os.environ.get("DOTHUB_USER_TOKEN")
CLI_BASE_ARGS = ["--user=dothub-user", "--token=" + str(DOTHUB_TOKEN)]
ORG_CONFIG = {
'teams': {
},
'hooks': {
},
'members': {
'dothub-bot': {
'role': 'admin'
},
'Mariocj89': {
'role': 'admin'
}
},
'options': {
'billing_email': 'mariocj89+dothub@gmail.com',
'description': 'Test updating the description',
'location': None,
'company': None,
'name': 'New name',
'email': None
}
}
skip_on_no_token = pytest.mark.skipif(
not DOTHUB_TOKEN,
reason="Missing DOTHUB user token for integration tests"
)
@pytest.yield_fixture()
def preserve_org():
"""Saves the org config and restores it at the end of each test"""
with tempfile.NamedTemporaryFile() as original_config:
original_config_file = original_config.name
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
yield
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
@skip_on_no_token
def test_configure_org(preserve_org):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as test_config:
test_config_file = test_config.name
# ########################
# Test updating the config
# ########################
utils.serialize_yaml(ORG_CONFIG, test_config_file)
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
# ##########################
# Test retrieving the config
# ##########################
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
assert ORG_CONFIG == utils.load_yaml(test_config_file)
|
Add org end to end testing"""Tests updating config of a real org and repo under the dothub-sandbox organization"""
import pytest
from click.testing import CliRunner
import tempfile
from dothub._main import dothub
from dothub import utils
import os
DOTHUB_TOKEN = os.environ.get("DOTHUB_USER_TOKEN")
CLI_BASE_ARGS = ["--user=dothub-user", "--token=" + str(DOTHUB_TOKEN)]
ORG_CONFIG = {
'teams': {
},
'hooks': {
},
'members': {
'dothub-bot': {
'role': 'admin'
},
'Mariocj89': {
'role': 'admin'
}
},
'options': {
'billing_email': 'mariocj89+dothub@gmail.com',
'description': 'Test updating the description',
'location': None,
'company': None,
'name': 'New name',
'email': None
}
}
skip_on_no_token = pytest.mark.skipif(
not DOTHUB_TOKEN,
reason="Missing DOTHUB user token for integration tests"
)
@pytest.yield_fixture()
def preserve_org():
"""Saves the org config and restores it at the end of each test"""
with tempfile.NamedTemporaryFile() as original_config:
original_config_file = original_config.name
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
yield
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
@skip_on_no_token
def test_configure_org(preserve_org):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as test_config:
test_config_file = test_config.name
# ########################
# Test updating the config
# ########################
utils.serialize_yaml(ORG_CONFIG, test_config_file)
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
# ##########################
# Test retrieving the config
# ##########################
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
assert ORG_CONFIG == utils.load_yaml(test_config_file)
|
<commit_before><commit_msg>Add org end to end testing<commit_after>"""Tests updating config of a real org and repo under the dothub-sandbox organization"""
import pytest
from click.testing import CliRunner
import tempfile
from dothub._main import dothub
from dothub import utils
import os
DOTHUB_TOKEN = os.environ.get("DOTHUB_USER_TOKEN")
CLI_BASE_ARGS = ["--user=dothub-user", "--token=" + str(DOTHUB_TOKEN)]
ORG_CONFIG = {
'teams': {
},
'hooks': {
},
'members': {
'dothub-bot': {
'role': 'admin'
},
'Mariocj89': {
'role': 'admin'
}
},
'options': {
'billing_email': 'mariocj89+dothub@gmail.com',
'description': 'Test updating the description',
'location': None,
'company': None,
'name': 'New name',
'email': None
}
}
skip_on_no_token = pytest.mark.skipif(
not DOTHUB_TOKEN,
reason="Missing DOTHUB user token for integration tests"
)
@pytest.yield_fixture()
def preserve_org():
"""Saves the org config and restores it at the end of each test"""
with tempfile.NamedTemporaryFile() as original_config:
original_config_file = original_config.name
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
yield
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + original_config_file]
result = CliRunner().invoke(dothub, args, obj={})
assert 0 == result.exit_code
@skip_on_no_token
def test_configure_org(preserve_org):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as test_config:
test_config_file = test_config.name
# ########################
# Test updating the config
# ########################
utils.serialize_yaml(ORG_CONFIG, test_config_file)
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "push",
"--input_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
# ##########################
# Test retrieving the config
# ##########################
args = CLI_BASE_ARGS + ["org", "--name=dothub-sandbox", "pull",
"--output_file=" + test_config_file]
result = runner.invoke(dothub, args, obj={})
assert 0 == result.exit_code
assert ORG_CONFIG == utils.load_yaml(test_config_file)
|
|
cdf33a078e8d50ee58ae86000e9e21a326b014c6
|
capple.py
|
capple.py
|
# http://www.codechef.com/DEC14/problems/CAPPLE
def pluck(trees):
# find the biggest and the second biggest number in the list
maximum = max(trees)
second_max = max( map(lambda x: -1 if x == maximum else x, trees) )
if maximum == second_max:
return [0]*len(trees)
else:
diff = maximum - second_max
return map(lambda x: x - diff if x == maximum else x, trees)
def time(trees):
counter = 0
while not all(i == 0 for i in trees):
trees = pluck(trees)
counter += 1
return counter - 1
'''print time(range(1,1000))'''
inputs = int(raw_input())
while inputs:
length = int(raw_input())
trees = map(int, raw_input().split())
print time(trees)
inputs -= 1
|
Write solution to Chef and Apples problem.
|
Write solution to Chef and Apples problem.
|
Python
|
mit
|
paramsingh/cp,paramsingh/cp,paramsingh/codechef-solutions,paramsingh/codechef-solutions,paramsingh/codechef-solutions,paramsingh/cp,paramsingh/cp,paramsingh/cp,paramsingh/codechef-solutions
|
Write solution to Chef and Apples problem.
|
# http://www.codechef.com/DEC14/problems/CAPPLE
def pluck(trees):
# find the biggest and the second biggest number in the list
maximum = max(trees)
second_max = max( map(lambda x: -1 if x == maximum else x, trees) )
if maximum == second_max:
return [0]*len(trees)
else:
diff = maximum - second_max
return map(lambda x: x - diff if x == maximum else x, trees)
def time(trees):
counter = 0
while not all(i == 0 for i in trees):
trees = pluck(trees)
counter += 1
return counter - 1
'''print time(range(1,1000))'''
inputs = int(raw_input())
while inputs:
length = int(raw_input())
trees = map(int, raw_input().split())
print time(trees)
inputs -= 1
|
<commit_before><commit_msg>Write solution to Chef and Apples problem.<commit_after>
|
# http://www.codechef.com/DEC14/problems/CAPPLE
def pluck(trees):
# find the biggest and the second biggest number in the list
maximum = max(trees)
second_max = max( map(lambda x: -1 if x == maximum else x, trees) )
if maximum == second_max:
return [0]*len(trees)
else:
diff = maximum - second_max
return map(lambda x: x - diff if x == maximum else x, trees)
def time(trees):
counter = 0
while not all(i == 0 for i in trees):
trees = pluck(trees)
counter += 1
return counter - 1
'''print time(range(1,1000))'''
inputs = int(raw_input())
while inputs:
length = int(raw_input())
trees = map(int, raw_input().split())
print time(trees)
inputs -= 1
|
Write solution to Chef and Apples problem.# http://www.codechef.com/DEC14/problems/CAPPLE
def pluck(trees):
# find the biggest and the second biggest number in the list
maximum = max(trees)
second_max = max( map(lambda x: -1 if x == maximum else x, trees) )
if maximum == second_max:
return [0]*len(trees)
else:
diff = maximum - second_max
return map(lambda x: x - diff if x == maximum else x, trees)
def time(trees):
counter = 0
while not all(i == 0 for i in trees):
trees = pluck(trees)
counter += 1
return counter - 1
'''print time(range(1,1000))'''
inputs = int(raw_input())
while inputs:
length = int(raw_input())
trees = map(int, raw_input().split())
print time(trees)
inputs -= 1
|
<commit_before><commit_msg>Write solution to Chef and Apples problem.<commit_after># http://www.codechef.com/DEC14/problems/CAPPLE
def pluck(trees):
# find the biggest and the second biggest number in the list
maximum = max(trees)
second_max = max( map(lambda x: -1 if x == maximum else x, trees) )
if maximum == second_max:
return [0]*len(trees)
else:
diff = maximum - second_max
return map(lambda x: x - diff if x == maximum else x, trees)
def time(trees):
counter = 0
while not all(i == 0 for i in trees):
trees = pluck(trees)
counter += 1
return counter - 1
'''print time(range(1,1000))'''
inputs = int(raw_input())
while inputs:
length = int(raw_input())
trees = map(int, raw_input().split())
print time(trees)
inputs -= 1
|
|
09a592e12fb6b6d37bc9e1b760aebdba999992e9
|
debugger/chrome_tab_finder.py
|
debugger/chrome_tab_finder.py
|
from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
self._do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
@property
def chrome_found(self):
return self._session != None
def _do_handshake(self,s):
i = "ChromeDevToolsHandshake"
print len(i)
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
|
Make code that simply finds chrome tabs to attach to.
|
Make code that simply finds chrome tabs to attach to.
|
Python
|
apache-2.0
|
natduca/ndbg,natduca/ndbg,natduca/ndbg,natduca/ndbg,natduca/ndbg
|
Make code that simply finds chrome tabs to attach to.
|
from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
self._do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
@property
def chrome_found(self):
return self._session != None
def _do_handshake(self,s):
i = "ChromeDevToolsHandshake"
print len(i)
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
|
<commit_before><commit_msg>Make code that simply finds chrome tabs to attach to.<commit_after>
|
from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
self._do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
@property
def chrome_found(self):
return self._session != None
def _do_handshake(self,s):
i = "ChromeDevToolsHandshake"
print len(i)
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
|
Make code that simply finds chrome tabs to attach to.from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
self._do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
@property
def chrome_found(self):
return self._session != None
def _do_handshake(self,s):
i = "ChromeDevToolsHandshake"
print len(i)
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
|
<commit_before><commit_msg>Make code that simply finds chrome tabs to attach to.<commit_after>from util import *
import json
class ChromeTabFinder(object):
def __init__(self, host, port):
self._next_seq = 0
self._timer = Timer(250)
self._timer.tick.add_listener(self._tick)
self._host = host
self._port = port
self._session = None
self._get_tab_list_pending = False
self._tick()
def _tick(self):
if not self._session:
self._try_connect()
elif not self._get_tab_list_pending:
self._begin_get_tab_list()
def _try_connect(self):
try:
s = socket.socket()
s.connect((self._host, self._port))
self._do_handshake(s)
self._session = AsyncHTTPSession(s)
except:
self._session = None
log2("Could not connect to chrome on %s:%s", self._host, self._port)
@property
def chrome_found(self):
return self._session != None
def _do_handshake(self,s):
i = "ChromeDevToolsHandshake"
print len(i)
handshake = "ChromeDevToolsHandshake\r\n"
remaining = handshake
while len(remaining):
sent = s.send(handshake)
remaining = remaining[sent:]
handshake_ack = s.recv(len(handshake))
if handshake_ack != handshake:
raise Exception('handshake failed')
def _begin_get_tab_list(self):
self._get_tab_list_pending = True
self._session.request({"Tool":"DevToolsService"}, json.dumps({"command" : "list_tabs"}), self._finish_get_tab_list)
def _finish_get_tab_list(self, headers, content):
self._get_tab_list_pending = False
resp = json.loads(content)
print resp
def _on_close(self):
log1("chrome connection was closed. chrome processes won't be available.")
self._session = None
if __name__ == "__main__":
set_loglevel(2)
def init(*args):
try:
be = ChromeTabFinder(*args)
except:
import traceback; traceback.print_exc();
MessageLoop.quit()
# for chrome, launch with chrome --remote-shell-port
import sys
MessageLoop.add_message(init, "localhost", int(sys.argv[1]))
# MessageLoop.add_message(init, "localhost", 5858)
MessageLoop.run_no_gtk(lambda: False)
print "main done"
|
|
d2041e5b47d5eb239d55b71ef93c3ec3d468e508
|
etc/crates-graph.py
|
etc/crates-graph.py
|
#!/usr/bin/env python3
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import json
import os
import subprocess
import sys
def main(crate=None):
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
meta = json.loads(subprocess.check_output(["cargo", "metadata", "--format-version", "1"]))
graph = {}
for package in meta["packages"]:
if package["source"] is None: # Lives in this repo
for dependency in package["dependencies"]:
if dependency["source"] is None: # Also lives in this repo
graph.setdefault(package["name"], []).append(dependency["name"])
if crate:
filtered = {}
seen = set()
def traverse(name):
if name not in seen:
seen.add(name)
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph
print("// This is in Graphviz DOT format.")
print("// Use the 'dot' or 'xdot' tool to visualize.")
print('digraph "local crates" {')
for package, dependencies in filtered.items():
for dependency in dependencies:
print(' "%s" -> "%s";' % (package, dependency))
print("}")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
Add a minimal alternative to `cargo graph`
|
Add a minimal alternative to `cargo graph`
I tried `cargo graph` and some of its successors,
but didn’t manage to make them produce what I wanted
(or in some cases make them work at all.)
This Python script reimplements similar functionality
based on parsing the (JSON) output of `cargo metadata`.
Graphviz graphs can become hard to read very quickly as the number of nodes grows.
Servo’s dependency graph is very large, so pruning as much as possible is important.
This only shows `path` dependencies (that have their source in this repo),
and can take a parameter to only show recursive dependencies of a given crate.
See https://github.com/servo/servo/issues/19422#issuecomment-617038366 for an example.
I find that `xdot` is best for visualization since it is interactive.
This script is not used by anything.
I am making this PR only so that we have it somewhere
in case it becomes useful again at some point.
|
Python
|
mpl-2.0
|
splav/servo,splav/servo,KiChjang/servo,splav/servo,KiChjang/servo,splav/servo,KiChjang/servo,KiChjang/servo,KiChjang/servo,KiChjang/servo,KiChjang/servo,splav/servo,splav/servo,splav/servo,KiChjang/servo,splav/servo,splav/servo,splav/servo,KiChjang/servo,KiChjang/servo
|
Add a minimal alternative to `cargo graph`
I tried `cargo graph` and some of its successors,
but didn’t manage to make them produce what I wanted
(or in some cases make them work at all.)
This Python script reimplements similar functionality
based on parsing the (JSON) output of `cargo metadata`.
Graphviz graphs can become hard to read very quickly as the number of nodes grows.
Servo’s dependency graph is very large, so pruning as much as possible is important.
This only shows `path` dependencies (that have their source in this repo),
and can take a parameter to only show recursive dependencies of a given crate.
See https://github.com/servo/servo/issues/19422#issuecomment-617038366 for an example.
I find that `xdot` is best for visualization since it is interactive.
This script is not used by anything.
I am making this PR only so that we have it somewhere
in case it becomes useful again at some point.
|
#!/usr/bin/env python3
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import json
import os
import subprocess
import sys
def main(crate=None):
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
meta = json.loads(subprocess.check_output(["cargo", "metadata", "--format-version", "1"]))
graph = {}
for package in meta["packages"]:
if package["source"] is None: # Lives in this repo
for dependency in package["dependencies"]:
if dependency["source"] is None: # Also lives in this repo
graph.setdefault(package["name"], []).append(dependency["name"])
if crate:
filtered = {}
seen = set()
def traverse(name):
if name not in seen:
seen.add(name)
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph
print("// This is in Graphviz DOT format.")
print("// Use the 'dot' or 'xdot' tool to visualize.")
print('digraph "local crates" {')
for package, dependencies in filtered.items():
for dependency in dependencies:
print(' "%s" -> "%s";' % (package, dependency))
print("}")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
<commit_before><commit_msg>Add a minimal alternative to `cargo graph`
I tried `cargo graph` and some of its successors,
but didn’t manage to make them produce what I wanted
(or in some cases make them work at all.)
This Python script reimplements similar functionality
based on parsing the (JSON) output of `cargo metadata`.
Graphviz graphs can become hard to read very quickly as the number of nodes grows.
Servo’s dependency graph is very large, so pruning as much as possible is important.
This only shows `path` dependencies (that have their source in this repo),
and can take a parameter to only show recursive dependencies of a given crate.
See https://github.com/servo/servo/issues/19422#issuecomment-617038366 for an example.
I find that `xdot` is best for visualization since it is interactive.
This script is not used by anything.
I am making this PR only so that we have it somewhere
in case it becomes useful again at some point.<commit_after>
|
#!/usr/bin/env python3
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import json
import os
import subprocess
import sys
def main(crate=None):
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
meta = json.loads(subprocess.check_output(["cargo", "metadata", "--format-version", "1"]))
graph = {}
for package in meta["packages"]:
if package["source"] is None: # Lives in this repo
for dependency in package["dependencies"]:
if dependency["source"] is None: # Also lives in this repo
graph.setdefault(package["name"], []).append(dependency["name"])
if crate:
filtered = {}
seen = set()
def traverse(name):
if name not in seen:
seen.add(name)
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph
print("// This is in Graphviz DOT format.")
print("// Use the 'dot' or 'xdot' tool to visualize.")
print('digraph "local crates" {')
for package, dependencies in filtered.items():
for dependency in dependencies:
print(' "%s" -> "%s";' % (package, dependency))
print("}")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
Add a minimal alternative to `cargo graph`
I tried `cargo graph` and some of its successors,
but didn’t manage to make them produce what I wanted
(or in some cases make them work at all.)
This Python script reimplements similar functionality
based on parsing the (JSON) output of `cargo metadata`.
Graphviz graphs can become hard to read very quickly as the number of nodes grows.
Servo’s dependency graph is very large, so pruning as much as possible is important.
This only shows `path` dependencies (that have their source in this repo),
and can take a parameter to only show recursive dependencies of a given crate.
See https://github.com/servo/servo/issues/19422#issuecomment-617038366 for an example.
I find that `xdot` is best for visualization since it is interactive.
This script is not used by anything.
I am making this PR only so that we have it somewhere
in case it becomes useful again at some point.#!/usr/bin/env python3
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import json
import os
import subprocess
import sys
def main(crate=None):
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
meta = json.loads(subprocess.check_output(["cargo", "metadata", "--format-version", "1"]))
graph = {}
for package in meta["packages"]:
if package["source"] is None: # Lives in this repo
for dependency in package["dependencies"]:
if dependency["source"] is None: # Also lives in this repo
graph.setdefault(package["name"], []).append(dependency["name"])
if crate:
filtered = {}
seen = set()
def traverse(name):
if name not in seen:
seen.add(name)
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph
print("// This is in Graphviz DOT format.")
print("// Use the 'dot' or 'xdot' tool to visualize.")
print('digraph "local crates" {')
for package, dependencies in filtered.items():
for dependency in dependencies:
print(' "%s" -> "%s";' % (package, dependency))
print("}")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
<commit_before><commit_msg>Add a minimal alternative to `cargo graph`
I tried `cargo graph` and some of its successors,
but didn’t manage to make them produce what I wanted
(or in some cases make them work at all.)
This Python script reimplements similar functionality
based on parsing the (JSON) output of `cargo metadata`.
Graphviz graphs can become hard to read very quickly as the number of nodes grows.
Servo’s dependency graph is very large, so pruning as much as possible is important.
This only shows `path` dependencies (that have their source in this repo),
and can take a parameter to only show recursive dependencies of a given crate.
See https://github.com/servo/servo/issues/19422#issuecomment-617038366 for an example.
I find that `xdot` is best for visualization since it is interactive.
This script is not used by anything.
I am making this PR only so that we have it somewhere
in case it becomes useful again at some point.<commit_after>#!/usr/bin/env python3
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import json
import os
import subprocess
import sys
def main(crate=None):
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
meta = json.loads(subprocess.check_output(["cargo", "metadata", "--format-version", "1"]))
graph = {}
for package in meta["packages"]:
if package["source"] is None: # Lives in this repo
for dependency in package["dependencies"]:
if dependency["source"] is None: # Also lives in this repo
graph.setdefault(package["name"], []).append(dependency["name"])
if crate:
filtered = {}
seen = set()
def traverse(name):
if name not in seen:
seen.add(name)
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph
print("// This is in Graphviz DOT format.")
print("// Use the 'dot' or 'xdot' tool to visualize.")
print('digraph "local crates" {')
for package, dependencies in filtered.items():
for dependency in dependencies:
print(' "%s" -> "%s";' % (package, dependency))
print("}")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
|
|
f2193d7b9b88e85d8e6385d9e6778f1e70a03c3f
|
peas-demo/plugins/pythonhello/pythonhello.py
|
peas-demo/plugins/pythonhello/pythonhello.py
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label, True, True, 0)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
Fix the sample python plugin.
|
[Python] Fix the sample python plugin.
PyGI doesn't handle default values for introspected methods,
so we need to specify all the arguments for pack_start()
|
Python
|
lgpl-2.1
|
gregier/libpeas,gregier/libpeas,Distrotech/libpeas,Distrotech/libpeas,gregier/libpeas,GNOME/libpeas,chergert/libpeas,chergert/libpeas,GNOME/libpeas,Distrotech/libpeas,gregier/libpeas,chergert/libpeas
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
[Python] Fix the sample python plugin.
PyGI doesn't handle default values for introspected methods,
so we need to specify all the arguments for pack_start()
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label, True, True, 0)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
<commit_before># -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
<commit_msg>[Python] Fix the sample python plugin.
PyGI doesn't handle default values for introspected methods,
so we need to specify all the arguments for pack_start()<commit_after>
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label, True, True, 0)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
[Python] Fix the sample python plugin.
PyGI doesn't handle default values for introspected methods,
so we need to specify all the arguments for pack_start()# -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label, True, True, 0)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
<commit_before># -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
<commit_msg>[Python] Fix the sample python plugin.
PyGI doesn't handle default values for introspected methods,
so we need to specify all the arguments for pack_start()<commit_after># -*- coding: utf-8 -*-
# ex:set ts=4 et sw=4 ai:
import gobject
from gi.repository import Peas
from gi.repository import Gtk
LABEL_STRING="Python Says Hello!"
class PythonHelloPlugin(gobject.GObject, Peas.Activatable):
__gtype_name__ = 'PythonHelloPlugin'
def do_activate(self, window):
print "PythonHelloPlugin.do_activate", repr(window)
window._pythonhello_label = Gtk.Label()
window._pythonhello_label.set_text(LABEL_STRING)
window._pythonhello_label.show()
window.get_child().pack_start(window._pythonhello_label, True, True, 0)
def do_deactivate(self, window):
print "PythonHelloPlugin.do_deactivate", repr(window)
window.get_child().remove(window._pythonhello_label)
window._pythonhello_label.destroy()
def do_update_state(self, window):
print "PythonHelloPlugin.do_update_state", repr(window)
|
5068efc63026cb55d521e265b4d263d584594dbc
|
elpiwear/proximity_warning.py
|
elpiwear/proximity_warning.py
|
import Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
|
Add the proximity warning detector class
|
Add the proximity warning detector class
|
Python
|
mit
|
fjacob21/pycon2015
|
Add the proximity warning detector class
|
import Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
|
<commit_before><commit_msg>Add the proximity warning detector class<commit_after>
|
import Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
|
Add the proximity warning detector classimport Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
|
<commit_before><commit_msg>Add the proximity warning detector class<commit_after>import Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
|
|
1fe81e1c086b0f8476dbc3b58f249de6b12b13af
|
tests/v6/test_spawn_derived_generators.py
|
tests/v6/test_spawn_derived_generators.py
|
import pytest
#from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .context import tohu
from tohu.v6.primitive_generators import Integer
from tohu.v6.derived_generators import Apply
def test_spawn_apply_generator():
"""
Test that an Apply generator can be spawned and the spawned version produces the same elements.
"""
num_items = 50
def add(x, y):
return x + y
a = Integer(10, 99)
b = Integer(10, 99)
g = Apply(add, a, b)
a.reset(seed=11111)
b.reset(seed=22222)
g.reset(seed=12345)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h (together with a and b) and re-generate the full list of items
a.reset(seed=11111)
b.reset(seed=22222)
h.reset(seed=12345)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
Add test that an Apply generator can be spawned and the spawned version produces the same elements
|
Add test that an Apply generator can be spawned and the spawned version produces the same elements
|
Python
|
mit
|
maxalbert/tohu
|
Add test that an Apply generator can be spawned and the spawned version produces the same elements
|
import pytest
#from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .context import tohu
from tohu.v6.primitive_generators import Integer
from tohu.v6.derived_generators import Apply
def test_spawn_apply_generator():
"""
Test that an Apply generator can be spawned and the spawned version produces the same elements.
"""
num_items = 50
def add(x, y):
return x + y
a = Integer(10, 99)
b = Integer(10, 99)
g = Apply(add, a, b)
a.reset(seed=11111)
b.reset(seed=22222)
g.reset(seed=12345)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h (together with a and b) and re-generate the full list of items
a.reset(seed=11111)
b.reset(seed=22222)
h.reset(seed=12345)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
<commit_before><commit_msg>Add test that an Apply generator can be spawned and the spawned version produces the same elements<commit_after>
|
import pytest
#from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .context import tohu
from tohu.v6.primitive_generators import Integer
from tohu.v6.derived_generators import Apply
def test_spawn_apply_generator():
"""
Test that an Apply generator can be spawned and the spawned version produces the same elements.
"""
num_items = 50
def add(x, y):
return x + y
a = Integer(10, 99)
b = Integer(10, 99)
g = Apply(add, a, b)
a.reset(seed=11111)
b.reset(seed=22222)
g.reset(seed=12345)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h (together with a and b) and re-generate the full list of items
a.reset(seed=11111)
b.reset(seed=22222)
h.reset(seed=12345)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
Add test that an Apply generator can be spawned and the spawned version produces the same elementsimport pytest
#from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .context import tohu
from tohu.v6.primitive_generators import Integer
from tohu.v6.derived_generators import Apply
def test_spawn_apply_generator():
"""
Test that an Apply generator can be spawned and the spawned version produces the same elements.
"""
num_items = 50
def add(x, y):
return x + y
a = Integer(10, 99)
b = Integer(10, 99)
g = Apply(add, a, b)
a.reset(seed=11111)
b.reset(seed=22222)
g.reset(seed=12345)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h (together with a and b) and re-generate the full list of items
a.reset(seed=11111)
b.reset(seed=22222)
h.reset(seed=12345)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
<commit_before><commit_msg>Add test that an Apply generator can be spawned and the spawned version produces the same elements<commit_after>import pytest
#from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .context import tohu
from tohu.v6.primitive_generators import Integer
from tohu.v6.derived_generators import Apply
def test_spawn_apply_generator():
"""
Test that an Apply generator can be spawned and the spawned version produces the same elements.
"""
num_items = 50
def add(x, y):
return x + y
a = Integer(10, 99)
b = Integer(10, 99)
g = Apply(add, a, b)
a.reset(seed=11111)
b.reset(seed=22222)
g.reset(seed=12345)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h (together with a and b) and re-generate the full list of items
a.reset(seed=11111)
b.reset(seed=22222)
h.reset(seed=12345)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
|
4c3f4b5a19832d965714997652ad245ac8abf310
|
animal.py
|
animal.py
|
#!/usr/bin/python3
import sys
import json
TEXT_FILE_PATH = './text.json'
messages = None
def traverse(parent, path):
node = parent[path]
if (isinstance(node, str)):
if not ask_yes_no_question('{}{}'.format(messages.get('isItA'), node)):
animal = input(messages.get('itWas'))
question = input('{}{}{}{}: '.format(
messages.get('differ'),
animal,
messages.get('fromA'),
node
))
parent[path] = [question, animal, node]
print(messages.get('again'))
return
else:
if ask_yes_no_question(node[0]):
traverse(node, 1)
else:
traverse(node, 2)
def show_known_animals():
nodes_to_visit = [messages.get('data')]
result = set()
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if isinstance(node[1], str):
result.add(node[1])
else:
nodes_to_visit.append(node[1])
if isinstance(node[2], str):
result.add(node[2])
else:
nodes_to_visit.append(node[2])
print(messages.get('known'), ', '.join(result))
def exit_game():
print(messages.get('exit'))
sys.exit()
def ask_yes_no_question(question):
while True:
answer = input('{} ? '.format(question)).upper()
if answer in ['Y', 'YES', 'TRUE']:
return True
elif answer in ['N', 'NO', 'FALSE']:
return False
def handle_top_command(answer):
if answer in ['L', 'LIST']:
show_known_animals()
elif answer in ['Y', 'YES']:
traverse(messages, 'data')
elif answer in ['N', 'NO']:
exit_game()
def start():
print(messages.get('start'))
while True:
answer = input(messages.get('mood'))
handle_top_command(answer.upper())
def load_text():
global messages
with open(TEXT_FILE_PATH) as f:
messages = json.load(f);
def main():
load_text()
start()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
Implement 'Guess The Animal' game in Python 3
|
Implement 'Guess The Animal' game in Python 3
Resolves #6
|
Python
|
mit
|
drom/animal,drom/animal,drom/animal,drom/animal,drom/animal,drom/animal
|
Implement 'Guess The Animal' game in Python 3
Resolves #6
|
#!/usr/bin/python3
import sys
import json
TEXT_FILE_PATH = './text.json'
messages = None
def traverse(parent, path):
node = parent[path]
if (isinstance(node, str)):
if not ask_yes_no_question('{}{}'.format(messages.get('isItA'), node)):
animal = input(messages.get('itWas'))
question = input('{}{}{}{}: '.format(
messages.get('differ'),
animal,
messages.get('fromA'),
node
))
parent[path] = [question, animal, node]
print(messages.get('again'))
return
else:
if ask_yes_no_question(node[0]):
traverse(node, 1)
else:
traverse(node, 2)
def show_known_animals():
nodes_to_visit = [messages.get('data')]
result = set()
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if isinstance(node[1], str):
result.add(node[1])
else:
nodes_to_visit.append(node[1])
if isinstance(node[2], str):
result.add(node[2])
else:
nodes_to_visit.append(node[2])
print(messages.get('known'), ', '.join(result))
def exit_game():
print(messages.get('exit'))
sys.exit()
def ask_yes_no_question(question):
while True:
answer = input('{} ? '.format(question)).upper()
if answer in ['Y', 'YES', 'TRUE']:
return True
elif answer in ['N', 'NO', 'FALSE']:
return False
def handle_top_command(answer):
if answer in ['L', 'LIST']:
show_known_animals()
elif answer in ['Y', 'YES']:
traverse(messages, 'data')
elif answer in ['N', 'NO']:
exit_game()
def start():
print(messages.get('start'))
while True:
answer = input(messages.get('mood'))
handle_top_command(answer.upper())
def load_text():
global messages
with open(TEXT_FILE_PATH) as f:
messages = json.load(f);
def main():
load_text()
start()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
<commit_before><commit_msg>Implement 'Guess The Animal' game in Python 3
Resolves #6<commit_after>
|
#!/usr/bin/python3
import sys
import json
TEXT_FILE_PATH = './text.json'
messages = None
def traverse(parent, path):
node = parent[path]
if (isinstance(node, str)):
if not ask_yes_no_question('{}{}'.format(messages.get('isItA'), node)):
animal = input(messages.get('itWas'))
question = input('{}{}{}{}: '.format(
messages.get('differ'),
animal,
messages.get('fromA'),
node
))
parent[path] = [question, animal, node]
print(messages.get('again'))
return
else:
if ask_yes_no_question(node[0]):
traverse(node, 1)
else:
traverse(node, 2)
def show_known_animals():
nodes_to_visit = [messages.get('data')]
result = set()
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if isinstance(node[1], str):
result.add(node[1])
else:
nodes_to_visit.append(node[1])
if isinstance(node[2], str):
result.add(node[2])
else:
nodes_to_visit.append(node[2])
print(messages.get('known'), ', '.join(result))
def exit_game():
print(messages.get('exit'))
sys.exit()
def ask_yes_no_question(question):
while True:
answer = input('{} ? '.format(question)).upper()
if answer in ['Y', 'YES', 'TRUE']:
return True
elif answer in ['N', 'NO', 'FALSE']:
return False
def handle_top_command(answer):
if answer in ['L', 'LIST']:
show_known_animals()
elif answer in ['Y', 'YES']:
traverse(messages, 'data')
elif answer in ['N', 'NO']:
exit_game()
def start():
print(messages.get('start'))
while True:
answer = input(messages.get('mood'))
handle_top_command(answer.upper())
def load_text():
global messages
with open(TEXT_FILE_PATH) as f:
messages = json.load(f);
def main():
load_text()
start()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
Implement 'Guess The Animal' game in Python 3
Resolves #6#!/usr/bin/python3
import sys
import json
TEXT_FILE_PATH = './text.json'
messages = None
def traverse(parent, path):
node = parent[path]
if (isinstance(node, str)):
if not ask_yes_no_question('{}{}'.format(messages.get('isItA'), node)):
animal = input(messages.get('itWas'))
question = input('{}{}{}{}: '.format(
messages.get('differ'),
animal,
messages.get('fromA'),
node
))
parent[path] = [question, animal, node]
print(messages.get('again'))
return
else:
if ask_yes_no_question(node[0]):
traverse(node, 1)
else:
traverse(node, 2)
def show_known_animals():
nodes_to_visit = [messages.get('data')]
result = set()
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if isinstance(node[1], str):
result.add(node[1])
else:
nodes_to_visit.append(node[1])
if isinstance(node[2], str):
result.add(node[2])
else:
nodes_to_visit.append(node[2])
print(messages.get('known'), ', '.join(result))
def exit_game():
print(messages.get('exit'))
sys.exit()
def ask_yes_no_question(question):
while True:
answer = input('{} ? '.format(question)).upper()
if answer in ['Y', 'YES', 'TRUE']:
return True
elif answer in ['N', 'NO', 'FALSE']:
return False
def handle_top_command(answer):
if answer in ['L', 'LIST']:
show_known_animals()
elif answer in ['Y', 'YES']:
traverse(messages, 'data')
elif answer in ['N', 'NO']:
exit_game()
def start():
print(messages.get('start'))
while True:
answer = input(messages.get('mood'))
handle_top_command(answer.upper())
def load_text():
global messages
with open(TEXT_FILE_PATH) as f:
messages = json.load(f);
def main():
load_text()
start()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
<commit_before><commit_msg>Implement 'Guess The Animal' game in Python 3
Resolves #6<commit_after>#!/usr/bin/python3
import sys
import json
TEXT_FILE_PATH = './text.json'
messages = None
def traverse(parent, path):
node = parent[path]
if (isinstance(node, str)):
if not ask_yes_no_question('{}{}'.format(messages.get('isItA'), node)):
animal = input(messages.get('itWas'))
question = input('{}{}{}{}: '.format(
messages.get('differ'),
animal,
messages.get('fromA'),
node
))
parent[path] = [question, animal, node]
print(messages.get('again'))
return
else:
if ask_yes_no_question(node[0]):
traverse(node, 1)
else:
traverse(node, 2)
def show_known_animals():
nodes_to_visit = [messages.get('data')]
result = set()
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if isinstance(node[1], str):
result.add(node[1])
else:
nodes_to_visit.append(node[1])
if isinstance(node[2], str):
result.add(node[2])
else:
nodes_to_visit.append(node[2])
print(messages.get('known'), ', '.join(result))
def exit_game():
print(messages.get('exit'))
sys.exit()
def ask_yes_no_question(question):
while True:
answer = input('{} ? '.format(question)).upper()
if answer in ['Y', 'YES', 'TRUE']:
return True
elif answer in ['N', 'NO', 'FALSE']:
return False
def handle_top_command(answer):
if answer in ['L', 'LIST']:
show_known_animals()
elif answer in ['Y', 'YES']:
traverse(messages, 'data')
elif answer in ['N', 'NO']:
exit_game()
def start():
print(messages.get('start'))
while True:
answer = input(messages.get('mood'))
handle_top_command(answer.upper())
def load_text():
global messages
with open(TEXT_FILE_PATH) as f:
messages = json.load(f);
def main():
load_text()
start()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
|
54fe88a3b8151c1a41ecd597ccf6a17db32d9af7
|
ooni/nettests/blocking/meek_fronted_requests.py
|
ooni/nettests/blocking/meek_fronted_requests.py
|
# -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from ooni.templates import httpt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [ ['ExpectedBody', 'B',
'I’m just a happy little web server.\n',
'Expected body content from GET response'],
['DomainName', 'D', None,
'Specify a single fronted DomainName to test.'],
['HostHeader', 'H', None,
'Specify "inside" Host Header to test.']
]
class meekTest(httpt.HTTPTest):
"""
Performs a HTTP GET request to a list of fronted domains with the Host
Header of the "inside" meek-server. The meek-server handles a GET request
and response with: "I’m just a happy little web server.\n".
The input file should be formatted as (one per line):
"DomainName:HostHeader"
Some default meek DomainName and HostHeader combinations:
www.google.com:meek-reflect.appspot.com
ajax.aspnetcdn.com:az668014.vo.msecnd.net
a0.awsstatic.com:d2zfqthxsdq309.cloudfront.net
"""
name = "meek fronted requests test"
version = "0.0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
"File containing the DomainName:HostHeader combinations to\
be tested, one per line."]
requiresRoot = False
requiresTor = False
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.DomainName, self.header = self.input.split(':')
elif (self.localOptions['DomainName'] and
self.localOptions['HostHeader']):
self.DomainName = self.localOptions['DomainName']
self.header = self.localOptions['HostHeader']
else:
raise Exception("No input specified")
self.ExpectedBody = self.localOptions['ExpectedBody']
self.DomainName = 'https://' + self.DomainName
def test_meek_response(self):
"""
Detects if the fronted request is blocked.
"""
log.msg("Testing fronted domain:%s with Host Header:%s"
% (self.DomainName, self.header))
def process_body(body):
if self.ExpectedBody != body:
self.report['censored'] = True
else:
self.report['censored'] = False
headers = {}
headers['Host'] = [self.header]
return self.doRequest(self.DomainName, method="GET", headers=headers,
body_processor=process_body)
|
Add meek fronted requests test
|
Add meek fronted requests test
|
Python
|
bsd-2-clause
|
lordappsec/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe
|
Add meek fronted requests test
|
# -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from ooni.templates import httpt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [ ['ExpectedBody', 'B',
'I’m just a happy little web server.\n',
'Expected body content from GET response'],
['DomainName', 'D', None,
'Specify a single fronted DomainName to test.'],
['HostHeader', 'H', None,
'Specify "inside" Host Header to test.']
]
class meekTest(httpt.HTTPTest):
"""
Performs a HTTP GET request to a list of fronted domains with the Host
Header of the "inside" meek-server. The meek-server handles a GET request
and response with: "I’m just a happy little web server.\n".
The input file should be formatted as (one per line):
"DomainName:HostHeader"
Some default meek DomainName and HostHeader combinations:
www.google.com:meek-reflect.appspot.com
ajax.aspnetcdn.com:az668014.vo.msecnd.net
a0.awsstatic.com:d2zfqthxsdq309.cloudfront.net
"""
name = "meek fronted requests test"
version = "0.0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
"File containing the DomainName:HostHeader combinations to\
be tested, one per line."]
requiresRoot = False
requiresTor = False
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.DomainName, self.header = self.input.split(':')
elif (self.localOptions['DomainName'] and
self.localOptions['HostHeader']):
self.DomainName = self.localOptions['DomainName']
self.header = self.localOptions['HostHeader']
else:
raise Exception("No input specified")
self.ExpectedBody = self.localOptions['ExpectedBody']
self.DomainName = 'https://' + self.DomainName
def test_meek_response(self):
"""
Detects if the fronted request is blocked.
"""
log.msg("Testing fronted domain:%s with Host Header:%s"
% (self.DomainName, self.header))
def process_body(body):
if self.ExpectedBody != body:
self.report['censored'] = True
else:
self.report['censored'] = False
headers = {}
headers['Host'] = [self.header]
return self.doRequest(self.DomainName, method="GET", headers=headers,
body_processor=process_body)
|
<commit_before><commit_msg>Add meek fronted requests test<commit_after>
|
# -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from ooni.templates import httpt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [ ['ExpectedBody', 'B',
'I’m just a happy little web server.\n',
'Expected body content from GET response'],
['DomainName', 'D', None,
'Specify a single fronted DomainName to test.'],
['HostHeader', 'H', None,
'Specify "inside" Host Header to test.']
]
class meekTest(httpt.HTTPTest):
"""
Performs a HTTP GET request to a list of fronted domains with the Host
Header of the "inside" meek-server. The meek-server handles a GET request
and response with: "I’m just a happy little web server.\n".
The input file should be formatted as (one per line):
"DomainName:HostHeader"
Some default meek DomainName and HostHeader combinations:
www.google.com:meek-reflect.appspot.com
ajax.aspnetcdn.com:az668014.vo.msecnd.net
a0.awsstatic.com:d2zfqthxsdq309.cloudfront.net
"""
name = "meek fronted requests test"
version = "0.0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
"File containing the DomainName:HostHeader combinations to\
be tested, one per line."]
requiresRoot = False
requiresTor = False
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.DomainName, self.header = self.input.split(':')
elif (self.localOptions['DomainName'] and
self.localOptions['HostHeader']):
self.DomainName = self.localOptions['DomainName']
self.header = self.localOptions['HostHeader']
else:
raise Exception("No input specified")
self.ExpectedBody = self.localOptions['ExpectedBody']
self.DomainName = 'https://' + self.DomainName
def test_meek_response(self):
"""
Detects if the fronted request is blocked.
"""
log.msg("Testing fronted domain:%s with Host Header:%s"
% (self.DomainName, self.header))
def process_body(body):
if self.ExpectedBody != body:
self.report['censored'] = True
else:
self.report['censored'] = False
headers = {}
headers['Host'] = [self.header]
return self.doRequest(self.DomainName, method="GET", headers=headers,
body_processor=process_body)
|
Add meek fronted requests test# -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from ooni.templates import httpt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [ ['ExpectedBody', 'B',
'I’m just a happy little web server.\n',
'Expected body content from GET response'],
['DomainName', 'D', None,
'Specify a single fronted DomainName to test.'],
['HostHeader', 'H', None,
'Specify "inside" Host Header to test.']
]
class meekTest(httpt.HTTPTest):
"""
Performs a HTTP GET request to a list of fronted domains with the Host
Header of the "inside" meek-server. The meek-server handles a GET request
and response with: "I’m just a happy little web server.\n".
The input file should be formatted as (one per line):
"DomainName:HostHeader"
Some default meek DomainName and HostHeader combinations:
www.google.com:meek-reflect.appspot.com
ajax.aspnetcdn.com:az668014.vo.msecnd.net
a0.awsstatic.com:d2zfqthxsdq309.cloudfront.net
"""
name = "meek fronted requests test"
version = "0.0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
"File containing the DomainName:HostHeader combinations to\
be tested, one per line."]
requiresRoot = False
requiresTor = False
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.DomainName, self.header = self.input.split(':')
elif (self.localOptions['DomainName'] and
self.localOptions['HostHeader']):
self.DomainName = self.localOptions['DomainName']
self.header = self.localOptions['HostHeader']
else:
raise Exception("No input specified")
self.ExpectedBody = self.localOptions['ExpectedBody']
self.DomainName = 'https://' + self.DomainName
def test_meek_response(self):
"""
Detects if the fronted request is blocked.
"""
log.msg("Testing fronted domain:%s with Host Header:%s"
% (self.DomainName, self.header))
def process_body(body):
if self.ExpectedBody != body:
self.report['censored'] = True
else:
self.report['censored'] = False
headers = {}
headers['Host'] = [self.header]
return self.doRequest(self.DomainName, method="GET", headers=headers,
body_processor=process_body)
|
<commit_before><commit_msg>Add meek fronted requests test<commit_after># -*- encoding: utf-8 -*-
#
# :licence: see LICENSE
from twisted.python import usage
from ooni.templates import httpt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [ ['ExpectedBody', 'B',
'I’m just a happy little web server.\n',
'Expected body content from GET response'],
['DomainName', 'D', None,
'Specify a single fronted DomainName to test.'],
['HostHeader', 'H', None,
'Specify "inside" Host Header to test.']
]
class meekTest(httpt.HTTPTest):
"""
Performs a HTTP GET request to a list of fronted domains with the Host
Header of the "inside" meek-server. The meek-server handles a GET request
and response with: "I’m just a happy little web server.\n".
The input file should be formatted as (one per line):
"DomainName:HostHeader"
Some default meek DomainName and HostHeader combinations:
www.google.com:meek-reflect.appspot.com
ajax.aspnetcdn.com:az668014.vo.msecnd.net
a0.awsstatic.com:d2zfqthxsdq309.cloudfront.net
"""
name = "meek fronted requests test"
version = "0.0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
"File containing the DomainName:HostHeader combinations to\
be tested, one per line."]
requiresRoot = False
requiresTor = False
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.DomainName, self.header = self.input.split(':')
elif (self.localOptions['DomainName'] and
self.localOptions['HostHeader']):
self.DomainName = self.localOptions['DomainName']
self.header = self.localOptions['HostHeader']
else:
raise Exception("No input specified")
self.ExpectedBody = self.localOptions['ExpectedBody']
self.DomainName = 'https://' + self.DomainName
def test_meek_response(self):
"""
Detects if the fronted request is blocked.
"""
log.msg("Testing fronted domain:%s with Host Header:%s"
% (self.DomainName, self.header))
def process_body(body):
if self.ExpectedBody != body:
self.report['censored'] = True
else:
self.report['censored'] = False
headers = {}
headers['Host'] = [self.header]
return self.doRequest(self.DomainName, method="GET", headers=headers,
body_processor=process_body)
|
|
08f2f0dce2669bb0afd471dbf51c9aacc553989c
|
tests/test_experiments/test_utils.py
|
tests/test_experiments/test_utils.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest.mock import patch
import os
from experiments.utils import (
create_experiment_logs_path,
get_experiment_logs_path,
delete_experiment_logs,
get_experiment_outputs_path,
create_experiment_outputs_path,
delete_experiment_outputs,
)
from factories.factory_experiments import ExperimentFactory
from tests.utils import BaseTest
class TestExperimentUtils(BaseTest):
def setUp(self):
super().setUp()
with patch('experiments.tasks.build_experiment.apply_async') as _:
self.experiment = ExperimentFactory()
def test_experiment_logs_path_creation_deletion(self):
experiment_logs_path = get_experiment_logs_path(self.experiment.unique_name)
# Should be true, created by the signal
assert os.path.exists(experiment_logs_path) is True
delete_experiment_logs(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is False
create_experiment_logs_path(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is True
def test_experiment_outputs_path_creation_deletion(self):
experiment_outputs_path = get_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
create_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is True
delete_experiment_outputs(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
|
Add experiment utils function tests
|
Add experiment utils function tests
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add experiment utils function tests
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest.mock import patch
import os
from experiments.utils import (
create_experiment_logs_path,
get_experiment_logs_path,
delete_experiment_logs,
get_experiment_outputs_path,
create_experiment_outputs_path,
delete_experiment_outputs,
)
from factories.factory_experiments import ExperimentFactory
from tests.utils import BaseTest
class TestExperimentUtils(BaseTest):
def setUp(self):
super().setUp()
with patch('experiments.tasks.build_experiment.apply_async') as _:
self.experiment = ExperimentFactory()
def test_experiment_logs_path_creation_deletion(self):
experiment_logs_path = get_experiment_logs_path(self.experiment.unique_name)
# Should be true, created by the signal
assert os.path.exists(experiment_logs_path) is True
delete_experiment_logs(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is False
create_experiment_logs_path(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is True
def test_experiment_outputs_path_creation_deletion(self):
experiment_outputs_path = get_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
create_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is True
delete_experiment_outputs(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
|
<commit_before><commit_msg>Add experiment utils function tests<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest.mock import patch
import os
from experiments.utils import (
create_experiment_logs_path,
get_experiment_logs_path,
delete_experiment_logs,
get_experiment_outputs_path,
create_experiment_outputs_path,
delete_experiment_outputs,
)
from factories.factory_experiments import ExperimentFactory
from tests.utils import BaseTest
class TestExperimentUtils(BaseTest):
def setUp(self):
super().setUp()
with patch('experiments.tasks.build_experiment.apply_async') as _:
self.experiment = ExperimentFactory()
def test_experiment_logs_path_creation_deletion(self):
experiment_logs_path = get_experiment_logs_path(self.experiment.unique_name)
# Should be true, created by the signal
assert os.path.exists(experiment_logs_path) is True
delete_experiment_logs(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is False
create_experiment_logs_path(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is True
def test_experiment_outputs_path_creation_deletion(self):
experiment_outputs_path = get_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
create_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is True
delete_experiment_outputs(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
|
Add experiment utils function tests# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest.mock import patch
import os
from experiments.utils import (
create_experiment_logs_path,
get_experiment_logs_path,
delete_experiment_logs,
get_experiment_outputs_path,
create_experiment_outputs_path,
delete_experiment_outputs,
)
from factories.factory_experiments import ExperimentFactory
from tests.utils import BaseTest
class TestExperimentUtils(BaseTest):
def setUp(self):
super().setUp()
with patch('experiments.tasks.build_experiment.apply_async') as _:
self.experiment = ExperimentFactory()
def test_experiment_logs_path_creation_deletion(self):
experiment_logs_path = get_experiment_logs_path(self.experiment.unique_name)
# Should be true, created by the signal
assert os.path.exists(experiment_logs_path) is True
delete_experiment_logs(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is False
create_experiment_logs_path(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is True
def test_experiment_outputs_path_creation_deletion(self):
experiment_outputs_path = get_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
create_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is True
delete_experiment_outputs(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
|
<commit_before><commit_msg>Add experiment utils function tests<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest.mock import patch
import os
from experiments.utils import (
create_experiment_logs_path,
get_experiment_logs_path,
delete_experiment_logs,
get_experiment_outputs_path,
create_experiment_outputs_path,
delete_experiment_outputs,
)
from factories.factory_experiments import ExperimentFactory
from tests.utils import BaseTest
class TestExperimentUtils(BaseTest):
def setUp(self):
super().setUp()
with patch('experiments.tasks.build_experiment.apply_async') as _:
self.experiment = ExperimentFactory()
def test_experiment_logs_path_creation_deletion(self):
experiment_logs_path = get_experiment_logs_path(self.experiment.unique_name)
# Should be true, created by the signal
assert os.path.exists(experiment_logs_path) is True
delete_experiment_logs(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is False
create_experiment_logs_path(self.experiment.unique_name)
assert os.path.exists(experiment_logs_path) is True
def test_experiment_outputs_path_creation_deletion(self):
experiment_outputs_path = get_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
create_experiment_outputs_path(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is True
delete_experiment_outputs(self.experiment.unique_name)
assert os.path.exists(experiment_outputs_path) is False
|
|
20bd20f84a37f732f097628b01a3bce4f46ef022
|
find_dupes.py
|
find_dupes.py
|
#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n')
|
Add a utility script to print duplicates
|
Add a utility script to print duplicates
Add a utility script to print duplicates
|
Python
|
mit
|
mubaris/motivate,mubaris/motivate
|
Add a utility script to print duplicates
Add a utility script to print duplicates
|
#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n')
|
<commit_before><commit_msg>Add a utility script to print duplicates
Add a utility script to print duplicates<commit_after>
|
#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n')
|
Add a utility script to print duplicates
Add a utility script to print duplicates#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n')
|
<commit_before><commit_msg>Add a utility script to print duplicates
Add a utility script to print duplicates<commit_after>#!/usr/bin/env python3
import json
import os
import random
scriptpath = os.path.dirname(__file__)
data_dir = os.path.join(scriptpath, 'data')
all_json = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
quotes = []
for f in all_json:
filename = os.path.join(data_dir, f)
with open(filename) as json_data:
quotes += json.load(json_data)['data']
uniq_authors = { quote['author'] for quote in quotes}
uniq_quotes = { quote['quote'] for quote in quotes}
print('Unique quotes: {}, authors: {}'.format(len(uniq_quotes), len(uniq_authors)))
seen = set()
dupes = sorted([x for x in quotes if x['quote'] in seen or seen.add(x['quote'])], key=lambda x:x['quote'])
print(*dupes, sep='\n')
|
|
7f9eea2f981b8e852ce049aea1cf8a911363335d
|
mutual_information.py
|
mutual_information.py
|
from skimage.viewer import ImageViewer
from skimage import data, img_as_float
from skimage.viewer.plugins.plotplugin import PlotPlugin
from skimage.viewer.widgets import Slider
from skimage.exposure import rescale_intensity
from skimage.transform import rotate
import numpy as np
import matplotlib.pyplot as plt
class RotatedImageViewer(ImageViewer):
def __init__(self, image, **kwargs):
super(RotatedImageViewer, self).__init__(image, **kwargs)
slider_kwds = dict(value=0, low=0, high=5, update_on='release',
callback=self.update_angle, value_type='float')
self.slider = Slider('angle', **slider_kwds)
self.layout.addWidget(self.slider)
self.origin_image = image
def update_angle(self, name, angle):
self.image = rotate(self.original_image, angle)
self.histogram.draw(angle=angle)
class Histogram(PlotPlugin):
name = 'Histogram'
def __init__(self, original_viewer, **kwargs):
super(Histogram, self).__init__(height=400, **kwargs)
self.bins = np.linspace(0, 1, 100)
self.mpl_image = None
self.original_viewer = original_viewer
def attach(self, image_viewer):
super(Histogram, self).attach(image_viewer)
self.rotated_viewer = image_viewer
self.ax.set_title('Histogram')
self.ax.set_xlabel('Value in image 1')
self.ax.set_ylabel('Value in image 2')
self.draw(angle=0)
def draw(self, angle=0):
image1 = self.original_viewer.image
image2 = self.rotated_viewer.image
hist, x_edges, y_edges = np.histogram2d(image1.flatten(),
image2.flatten(),
self.bins, normed=True)
hist = np.log(1 + hist)
hist = rescale_intensity(hist, in_range=(0, 3))
if self.mpl_image is None:
self.mpl_image = self.ax.imshow(hist, extent=[0, 1, 0, 1],
cmap=plt.cm.gray)
else:
self.mpl_image.set_data(hist)
self.ax.figure.canvas.draw()
return hist
image = img_as_float(data.camera())
viewer = ImageViewer(image)
rotated_viewer = RotatedImageViewer(image)
histogram = Histogram(viewer)
rotated_viewer += histogram
rotated_viewer.histogram = histogram
super(ImageViewer, viewer).show()
rotated_viewer.show()
|
Add example of a 2-D histogram, illustrating mutual information.
|
Add example of a 2-D histogram, illustrating mutual information.
|
Python
|
bsd-3-clause
|
scikit-image/skimage-demos
|
Add example of a 2-D histogram, illustrating mutual information.
|
from skimage.viewer import ImageViewer
from skimage import data, img_as_float
from skimage.viewer.plugins.plotplugin import PlotPlugin
from skimage.viewer.widgets import Slider
from skimage.exposure import rescale_intensity
from skimage.transform import rotate
import numpy as np
import matplotlib.pyplot as plt
class RotatedImageViewer(ImageViewer):
def __init__(self, image, **kwargs):
super(RotatedImageViewer, self).__init__(image, **kwargs)
slider_kwds = dict(value=0, low=0, high=5, update_on='release',
callback=self.update_angle, value_type='float')
self.slider = Slider('angle', **slider_kwds)
self.layout.addWidget(self.slider)
self.origin_image = image
def update_angle(self, name, angle):
self.image = rotate(self.original_image, angle)
self.histogram.draw(angle=angle)
class Histogram(PlotPlugin):
name = 'Histogram'
def __init__(self, original_viewer, **kwargs):
super(Histogram, self).__init__(height=400, **kwargs)
self.bins = np.linspace(0, 1, 100)
self.mpl_image = None
self.original_viewer = original_viewer
def attach(self, image_viewer):
super(Histogram, self).attach(image_viewer)
self.rotated_viewer = image_viewer
self.ax.set_title('Histogram')
self.ax.set_xlabel('Value in image 1')
self.ax.set_ylabel('Value in image 2')
self.draw(angle=0)
def draw(self, angle=0):
image1 = self.original_viewer.image
image2 = self.rotated_viewer.image
hist, x_edges, y_edges = np.histogram2d(image1.flatten(),
image2.flatten(),
self.bins, normed=True)
hist = np.log(1 + hist)
hist = rescale_intensity(hist, in_range=(0, 3))
if self.mpl_image is None:
self.mpl_image = self.ax.imshow(hist, extent=[0, 1, 0, 1],
cmap=plt.cm.gray)
else:
self.mpl_image.set_data(hist)
self.ax.figure.canvas.draw()
return hist
image = img_as_float(data.camera())
viewer = ImageViewer(image)
rotated_viewer = RotatedImageViewer(image)
histogram = Histogram(viewer)
rotated_viewer += histogram
rotated_viewer.histogram = histogram
super(ImageViewer, viewer).show()
rotated_viewer.show()
|
<commit_before><commit_msg>Add example of a 2-D histogram, illustrating mutual information.<commit_after>
|
from skimage.viewer import ImageViewer
from skimage import data, img_as_float
from skimage.viewer.plugins.plotplugin import PlotPlugin
from skimage.viewer.widgets import Slider
from skimage.exposure import rescale_intensity
from skimage.transform import rotate
import numpy as np
import matplotlib.pyplot as plt
class RotatedImageViewer(ImageViewer):
def __init__(self, image, **kwargs):
super(RotatedImageViewer, self).__init__(image, **kwargs)
slider_kwds = dict(value=0, low=0, high=5, update_on='release',
callback=self.update_angle, value_type='float')
self.slider = Slider('angle', **slider_kwds)
self.layout.addWidget(self.slider)
self.origin_image = image
def update_angle(self, name, angle):
self.image = rotate(self.original_image, angle)
self.histogram.draw(angle=angle)
class Histogram(PlotPlugin):
name = 'Histogram'
def __init__(self, original_viewer, **kwargs):
super(Histogram, self).__init__(height=400, **kwargs)
self.bins = np.linspace(0, 1, 100)
self.mpl_image = None
self.original_viewer = original_viewer
def attach(self, image_viewer):
super(Histogram, self).attach(image_viewer)
self.rotated_viewer = image_viewer
self.ax.set_title('Histogram')
self.ax.set_xlabel('Value in image 1')
self.ax.set_ylabel('Value in image 2')
self.draw(angle=0)
def draw(self, angle=0):
image1 = self.original_viewer.image
image2 = self.rotated_viewer.image
hist, x_edges, y_edges = np.histogram2d(image1.flatten(),
image2.flatten(),
self.bins, normed=True)
hist = np.log(1 + hist)
hist = rescale_intensity(hist, in_range=(0, 3))
if self.mpl_image is None:
self.mpl_image = self.ax.imshow(hist, extent=[0, 1, 0, 1],
cmap=plt.cm.gray)
else:
self.mpl_image.set_data(hist)
self.ax.figure.canvas.draw()
return hist
image = img_as_float(data.camera())
viewer = ImageViewer(image)
rotated_viewer = RotatedImageViewer(image)
histogram = Histogram(viewer)
rotated_viewer += histogram
rotated_viewer.histogram = histogram
super(ImageViewer, viewer).show()
rotated_viewer.show()
|
Add example of a 2-D histogram, illustrating mutual information.from skimage.viewer import ImageViewer
from skimage import data, img_as_float
from skimage.viewer.plugins.plotplugin import PlotPlugin
from skimage.viewer.widgets import Slider
from skimage.exposure import rescale_intensity
from skimage.transform import rotate
import numpy as np
import matplotlib.pyplot as plt
class RotatedImageViewer(ImageViewer):
def __init__(self, image, **kwargs):
super(RotatedImageViewer, self).__init__(image, **kwargs)
slider_kwds = dict(value=0, low=0, high=5, update_on='release',
callback=self.update_angle, value_type='float')
self.slider = Slider('angle', **slider_kwds)
self.layout.addWidget(self.slider)
self.origin_image = image
def update_angle(self, name, angle):
self.image = rotate(self.original_image, angle)
self.histogram.draw(angle=angle)
class Histogram(PlotPlugin):
name = 'Histogram'
def __init__(self, original_viewer, **kwargs):
super(Histogram, self).__init__(height=400, **kwargs)
self.bins = np.linspace(0, 1, 100)
self.mpl_image = None
self.original_viewer = original_viewer
def attach(self, image_viewer):
super(Histogram, self).attach(image_viewer)
self.rotated_viewer = image_viewer
self.ax.set_title('Histogram')
self.ax.set_xlabel('Value in image 1')
self.ax.set_ylabel('Value in image 2')
self.draw(angle=0)
def draw(self, angle=0):
image1 = self.original_viewer.image
image2 = self.rotated_viewer.image
hist, x_edges, y_edges = np.histogram2d(image1.flatten(),
image2.flatten(),
self.bins, normed=True)
hist = np.log(1 + hist)
hist = rescale_intensity(hist, in_range=(0, 3))
if self.mpl_image is None:
self.mpl_image = self.ax.imshow(hist, extent=[0, 1, 0, 1],
cmap=plt.cm.gray)
else:
self.mpl_image.set_data(hist)
self.ax.figure.canvas.draw()
return hist
image = img_as_float(data.camera())
viewer = ImageViewer(image)
rotated_viewer = RotatedImageViewer(image)
histogram = Histogram(viewer)
rotated_viewer += histogram
rotated_viewer.histogram = histogram
super(ImageViewer, viewer).show()
rotated_viewer.show()
|
<commit_before><commit_msg>Add example of a 2-D histogram, illustrating mutual information.<commit_after>from skimage.viewer import ImageViewer
from skimage import data, img_as_float
from skimage.viewer.plugins.plotplugin import PlotPlugin
from skimage.viewer.widgets import Slider
from skimage.exposure import rescale_intensity
from skimage.transform import rotate
import numpy as np
import matplotlib.pyplot as plt
class RotatedImageViewer(ImageViewer):
def __init__(self, image, **kwargs):
super(RotatedImageViewer, self).__init__(image, **kwargs)
slider_kwds = dict(value=0, low=0, high=5, update_on='release',
callback=self.update_angle, value_type='float')
self.slider = Slider('angle', **slider_kwds)
self.layout.addWidget(self.slider)
self.origin_image = image
def update_angle(self, name, angle):
self.image = rotate(self.original_image, angle)
self.histogram.draw(angle=angle)
class Histogram(PlotPlugin):
name = 'Histogram'
def __init__(self, original_viewer, **kwargs):
super(Histogram, self).__init__(height=400, **kwargs)
self.bins = np.linspace(0, 1, 100)
self.mpl_image = None
self.original_viewer = original_viewer
def attach(self, image_viewer):
super(Histogram, self).attach(image_viewer)
self.rotated_viewer = image_viewer
self.ax.set_title('Histogram')
self.ax.set_xlabel('Value in image 1')
self.ax.set_ylabel('Value in image 2')
self.draw(angle=0)
def draw(self, angle=0):
image1 = self.original_viewer.image
image2 = self.rotated_viewer.image
hist, x_edges, y_edges = np.histogram2d(image1.flatten(),
image2.flatten(),
self.bins, normed=True)
hist = np.log(1 + hist)
hist = rescale_intensity(hist, in_range=(0, 3))
if self.mpl_image is None:
self.mpl_image = self.ax.imshow(hist, extent=[0, 1, 0, 1],
cmap=plt.cm.gray)
else:
self.mpl_image.set_data(hist)
self.ax.figure.canvas.draw()
return hist
image = img_as_float(data.camera())
viewer = ImageViewer(image)
rotated_viewer = RotatedImageViewer(image)
histogram = Histogram(viewer)
rotated_viewer += histogram
rotated_viewer.histogram = histogram
super(ImageViewer, viewer).show()
rotated_viewer.show()
|
|
ccda4d9c3e737161e0477c569e074ffb884a541c
|
src/sentry/api/authentication.py
|
src/sentry/api/authentication.py
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
return None
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed('API key is not valid')
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
Raise hard error when API key is invalid
|
Raise hard error when API key is invalid
|
Python
|
bsd-3-clause
|
fotinakis/sentry,ifduyue/sentry,JamesMura/sentry,daevaorn/sentry,looker/sentry,gencer/sentry,JamesMura/sentry,fotinakis/sentry,fotinakis/sentry,gencer/sentry,zenefits/sentry,looker/sentry,jean/sentry,BuildingLink/sentry,mvaled/sentry,mvaled/sentry,mvaled/sentry,ifduyue/sentry,jean/sentry,nicholasserra/sentry,gencer/sentry,JamesMura/sentry,jean/sentry,beeftornado/sentry,looker/sentry,JackDanger/sentry,daevaorn/sentry,nicholasserra/sentry,looker/sentry,JackDanger/sentry,alexm92/sentry,alexm92/sentry,gencer/sentry,BuildingLink/sentry,zenefits/sentry,mitsuhiko/sentry,daevaorn/sentry,zenefits/sentry,JackDanger/sentry,nicholasserra/sentry,beeftornado/sentry,looker/sentry,gencer/sentry,JamesMura/sentry,jean/sentry,jean/sentry,ifduyue/sentry,mvaled/sentry,zenefits/sentry,daevaorn/sentry,BuildingLink/sentry,JamesMura/sentry,zenefits/sentry,fotinakis/sentry,BuildingLink/sentry,beeftornado/sentry,mvaled/sentry,alexm92/sentry,ifduyue/sentry,ifduyue/sentry,BuildingLink/sentry,mvaled/sentry,mitsuhiko/sentry
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
return None
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
Raise hard error when API key is invalid
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed('API key is not valid')
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
<commit_before>from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
return None
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
<commit_msg>Raise hard error when API key is invalid<commit_after>
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed('API key is not valid')
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
return None
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
Raise hard error when API key is invalidfrom __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed('API key is not valid')
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
<commit_before>from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
return None
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
<commit_msg>Raise hard error when API key is invalid<commit_after>from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import constant_time_compare
from rest_framework.authentication import BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from sentry.app import raven
from sentry.models import ApiKey, ProjectKey
class QuietBasicAuthentication(BasicAuthentication):
def authenticate_header(self, request):
return 'xBasic realm="%s"' % self.www_authenticate_realm
class ApiKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
if password:
return
try:
key = ApiKey.objects.get_from_cache(key=userid)
except ApiKey.DoesNotExist:
raise AuthenticationFailed('API key is not valid')
if not key.is_active:
raise AuthenticationFailed('Key is disabled')
raven.tags_context({
'api_key': userid,
})
return (AnonymousUser(), key)
class ProjectKeyAuthentication(QuietBasicAuthentication):
def authenticate_credentials(self, userid, password):
try:
pk = ProjectKey.objects.get_from_cache(public_key=userid)
except ProjectKey.DoesNotExist:
return None
if not constant_time_compare(pk.secret_key, password):
return None
if not pk.is_active:
raise AuthenticationFailed('Key is disabled')
if not pk.roles.api:
raise AuthenticationFailed('Key does not allow API access')
return (AnonymousUser(), pk)
|
426b3cac7932289f24be7dc6dba6197a4d65740b
|
benchmarks/benchmarks/bench_scalar.py
|
benchmarks/benchmarks/bench_scalar.py
|
from .common import Benchmark, TYPES1
import numpy as np
class ScalarMath(Benchmark):
# Test scalar math, note that each of these is run repeatedly to offset
# the function call overhead to some degree.
params = [TYPES1]
param_names = ["type"]
def setup(self, typename):
self.num = np.dtype(typename).type(2)
def time_addition(self, typename):
n = self.num
res = n + n + n + n + n + n + n + n + n + n
def time_addition_pyint(self, typename):
n = self.num
res = n + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1
def time_multiplication(self, typename):
n = self.num
res = n * n * n * n * n * n * n * n * n * n
def time_power_of_two(self, typename):
n = self.num
res = n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2
def time_abs(self, typename):
n = self.num
res = abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(n))))))))))
|
Add simple scalar math benchmarks
|
BENCH: Add simple scalar math benchmarks
|
Python
|
bsd-3-clause
|
mhvk/numpy,grlee77/numpy,simongibbons/numpy,mhvk/numpy,madphysicist/numpy,endolith/numpy,pbrod/numpy,grlee77/numpy,anntzer/numpy,anntzer/numpy,numpy/numpy,jakirkham/numpy,endolith/numpy,jakirkham/numpy,anntzer/numpy,simongibbons/numpy,mhvk/numpy,rgommers/numpy,abalkin/numpy,pbrod/numpy,mattip/numpy,seberg/numpy,simongibbons/numpy,pdebuyl/numpy,charris/numpy,mattip/numpy,mattip/numpy,seberg/numpy,rgommers/numpy,simongibbons/numpy,jakirkham/numpy,numpy/numpy,charris/numpy,rgommers/numpy,grlee77/numpy,madphysicist/numpy,endolith/numpy,madphysicist/numpy,madphysicist/numpy,seberg/numpy,charris/numpy,endolith/numpy,pdebuyl/numpy,pbrod/numpy,abalkin/numpy,pdebuyl/numpy,mhvk/numpy,numpy/numpy,pbrod/numpy,pbrod/numpy,seberg/numpy,abalkin/numpy,anntzer/numpy,charris/numpy,madphysicist/numpy,jakirkham/numpy,simongibbons/numpy,mhvk/numpy,jakirkham/numpy,numpy/numpy,rgommers/numpy,grlee77/numpy,grlee77/numpy,mattip/numpy,pdebuyl/numpy
|
BENCH: Add simple scalar math benchmarks
|
from .common import Benchmark, TYPES1
import numpy as np
class ScalarMath(Benchmark):
# Test scalar math, note that each of these is run repeatedly to offset
# the function call overhead to some degree.
params = [TYPES1]
param_names = ["type"]
def setup(self, typename):
self.num = np.dtype(typename).type(2)
def time_addition(self, typename):
n = self.num
res = n + n + n + n + n + n + n + n + n + n
def time_addition_pyint(self, typename):
n = self.num
res = n + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1
def time_multiplication(self, typename):
n = self.num
res = n * n * n * n * n * n * n * n * n * n
def time_power_of_two(self, typename):
n = self.num
res = n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2
def time_abs(self, typename):
n = self.num
res = abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(n))))))))))
|
<commit_before><commit_msg>BENCH: Add simple scalar math benchmarks<commit_after>
|
from .common import Benchmark, TYPES1
import numpy as np
class ScalarMath(Benchmark):
# Test scalar math, note that each of these is run repeatedly to offset
# the function call overhead to some degree.
params = [TYPES1]
param_names = ["type"]
def setup(self, typename):
self.num = np.dtype(typename).type(2)
def time_addition(self, typename):
n = self.num
res = n + n + n + n + n + n + n + n + n + n
def time_addition_pyint(self, typename):
n = self.num
res = n + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1
def time_multiplication(self, typename):
n = self.num
res = n * n * n * n * n * n * n * n * n * n
def time_power_of_two(self, typename):
n = self.num
res = n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2
def time_abs(self, typename):
n = self.num
res = abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(n))))))))))
|
BENCH: Add simple scalar math benchmarksfrom .common import Benchmark, TYPES1
import numpy as np
class ScalarMath(Benchmark):
# Test scalar math, note that each of these is run repeatedly to offset
# the function call overhead to some degree.
params = [TYPES1]
param_names = ["type"]
def setup(self, typename):
self.num = np.dtype(typename).type(2)
def time_addition(self, typename):
n = self.num
res = n + n + n + n + n + n + n + n + n + n
def time_addition_pyint(self, typename):
n = self.num
res = n + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1
def time_multiplication(self, typename):
n = self.num
res = n * n * n * n * n * n * n * n * n * n
def time_power_of_two(self, typename):
n = self.num
res = n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2
def time_abs(self, typename):
n = self.num
res = abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(n))))))))))
|
<commit_before><commit_msg>BENCH: Add simple scalar math benchmarks<commit_after>from .common import Benchmark, TYPES1
import numpy as np
class ScalarMath(Benchmark):
# Test scalar math, note that each of these is run repeatedly to offset
# the function call overhead to some degree.
params = [TYPES1]
param_names = ["type"]
def setup(self, typename):
self.num = np.dtype(typename).type(2)
def time_addition(self, typename):
n = self.num
res = n + n + n + n + n + n + n + n + n + n
def time_addition_pyint(self, typename):
n = self.num
res = n + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1
def time_multiplication(self, typename):
n = self.num
res = n * n * n * n * n * n * n * n * n * n
def time_power_of_two(self, typename):
n = self.num
res = n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2, n**2
def time_abs(self, typename):
n = self.num
res = abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(n))))))))))
|
|
dda35fad4cb12b8b2fa4c54412f005d16d4a36ff
|
PDBMap.py
|
PDBMap.py
|
import gzip
from xml.parsers import expat
class PDBMap(object):
"""Parse PDBML file for mappings between indexes of auth_seq_num
+ pdb_ind_code (residue index in PDB) and seq_id (starts with 1
and includes unobserved residues).
"""
def __init__(self, pdb):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self._start_handler
self.parser.EndElementHandler = self._end_handler
self.parser.CharacterDataHandler = self._data_handler
self.pdb = pdb.lower()
self.pdbml_path = '/usr2/pdb/data/structures/divided/XML-noatom/' + \
'%s/%s-noatom.xml.gz' % (self.pdb[1:3], self.pdb)
self._tag = None
self._chain = None
self._seq_idx = None
self._pdb_idx = None
self._inscode = ''
self._isordered = False
self.mapping = {}
with gzip.open(self.pdbml_path, 'rb') as fp:
self.parser.ParseFile(fp)
def _start_handler(self, name, attrs):
if name == 'PDBx:pdbx_poly_seq_scheme':
self._chain = attrs['asym_id']
self._seq_idx = attrs['seq_id']
if self._chain not in self.mapping:
self.mapping[self._chain] = {'seq': [], 'pdb': []}
elif self._chain and name == 'PDBx:auth_seq_num':
self._isordered = True
self._tag = 'auth_seq_num'
elif self._chain and name == 'PDBx:pdb_ins_code':
self._tag = 'pdb_ins_code'
def _data_handler(self, data):
if self._chain and self._tag == 'auth_seq_num':
self._pdb_idx = data
elif self._chain and self._tag == 'PDBx:pdb_ins_code':
self._inscode = data
def _end_handler(self, name):
if name == 'PDBx:pdbx_poly_seq_scheme':
if self._isordered:
self.mapping[self._chain]['seq'].append(self._seq_idx)
self.mapping[self._chain]['pdb'].append(self._pdb_idx +
self._inscode)
self._isordered = False
self._seq_id = None
self._pdb_idx = None
self._chain = None
self._inscode = ''
self._tag = None
def get_mapping(self, chain):
return self.mapping[chain]
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
Add parser to parse the mapping between seqid and author assigned PDB indexes from PDBML format.
|
Add parser to parse the mapping between seqid and author assigned PDB
indexes from PDBML format.
|
Python
|
mit
|
tryptochan/domain_utils
|
Add parser to parse the mapping between seqid and author assigned PDB
indexes from PDBML format.
|
import gzip
from xml.parsers import expat
class PDBMap(object):
"""Parse PDBML file for mappings between indexes of auth_seq_num
+ pdb_ind_code (residue index in PDB) and seq_id (starts with 1
and includes unobserved residues).
"""
def __init__(self, pdb):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self._start_handler
self.parser.EndElementHandler = self._end_handler
self.parser.CharacterDataHandler = self._data_handler
self.pdb = pdb.lower()
self.pdbml_path = '/usr2/pdb/data/structures/divided/XML-noatom/' + \
'%s/%s-noatom.xml.gz' % (self.pdb[1:3], self.pdb)
self._tag = None
self._chain = None
self._seq_idx = None
self._pdb_idx = None
self._inscode = ''
self._isordered = False
self.mapping = {}
with gzip.open(self.pdbml_path, 'rb') as fp:
self.parser.ParseFile(fp)
def _start_handler(self, name, attrs):
if name == 'PDBx:pdbx_poly_seq_scheme':
self._chain = attrs['asym_id']
self._seq_idx = attrs['seq_id']
if self._chain not in self.mapping:
self.mapping[self._chain] = {'seq': [], 'pdb': []}
elif self._chain and name == 'PDBx:auth_seq_num':
self._isordered = True
self._tag = 'auth_seq_num'
elif self._chain and name == 'PDBx:pdb_ins_code':
self._tag = 'pdb_ins_code'
def _data_handler(self, data):
if self._chain and self._tag == 'auth_seq_num':
self._pdb_idx = data
elif self._chain and self._tag == 'PDBx:pdb_ins_code':
self._inscode = data
def _end_handler(self, name):
if name == 'PDBx:pdbx_poly_seq_scheme':
if self._isordered:
self.mapping[self._chain]['seq'].append(self._seq_idx)
self.mapping[self._chain]['pdb'].append(self._pdb_idx +
self._inscode)
self._isordered = False
self._seq_id = None
self._pdb_idx = None
self._chain = None
self._inscode = ''
self._tag = None
def get_mapping(self, chain):
return self.mapping[chain]
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
<commit_before><commit_msg>Add parser to parse the mapping between seqid and author assigned PDB
indexes from PDBML format.<commit_after>
|
import gzip
from xml.parsers import expat
class PDBMap(object):
"""Parse PDBML file for mappings between indexes of auth_seq_num
+ pdb_ind_code (residue index in PDB) and seq_id (starts with 1
and includes unobserved residues).
"""
def __init__(self, pdb):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self._start_handler
self.parser.EndElementHandler = self._end_handler
self.parser.CharacterDataHandler = self._data_handler
self.pdb = pdb.lower()
self.pdbml_path = '/usr2/pdb/data/structures/divided/XML-noatom/' + \
'%s/%s-noatom.xml.gz' % (self.pdb[1:3], self.pdb)
self._tag = None
self._chain = None
self._seq_idx = None
self._pdb_idx = None
self._inscode = ''
self._isordered = False
self.mapping = {}
with gzip.open(self.pdbml_path, 'rb') as fp:
self.parser.ParseFile(fp)
def _start_handler(self, name, attrs):
if name == 'PDBx:pdbx_poly_seq_scheme':
self._chain = attrs['asym_id']
self._seq_idx = attrs['seq_id']
if self._chain not in self.mapping:
self.mapping[self._chain] = {'seq': [], 'pdb': []}
elif self._chain and name == 'PDBx:auth_seq_num':
self._isordered = True
self._tag = 'auth_seq_num'
elif self._chain and name == 'PDBx:pdb_ins_code':
self._tag = 'pdb_ins_code'
def _data_handler(self, data):
if self._chain and self._tag == 'auth_seq_num':
self._pdb_idx = data
elif self._chain and self._tag == 'PDBx:pdb_ins_code':
self._inscode = data
def _end_handler(self, name):
if name == 'PDBx:pdbx_poly_seq_scheme':
if self._isordered:
self.mapping[self._chain]['seq'].append(self._seq_idx)
self.mapping[self._chain]['pdb'].append(self._pdb_idx +
self._inscode)
self._isordered = False
self._seq_id = None
self._pdb_idx = None
self._chain = None
self._inscode = ''
self._tag = None
def get_mapping(self, chain):
return self.mapping[chain]
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
Add parser to parse the mapping between seqid and author assigned PDB
indexes from PDBML format.import gzip
from xml.parsers import expat
class PDBMap(object):
"""Parse PDBML file for mappings between indexes of auth_seq_num
+ pdb_ind_code (residue index in PDB) and seq_id (starts with 1
and includes unobserved residues).
"""
def __init__(self, pdb):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self._start_handler
self.parser.EndElementHandler = self._end_handler
self.parser.CharacterDataHandler = self._data_handler
self.pdb = pdb.lower()
self.pdbml_path = '/usr2/pdb/data/structures/divided/XML-noatom/' + \
'%s/%s-noatom.xml.gz' % (self.pdb[1:3], self.pdb)
self._tag = None
self._chain = None
self._seq_idx = None
self._pdb_idx = None
self._inscode = ''
self._isordered = False
self.mapping = {}
with gzip.open(self.pdbml_path, 'rb') as fp:
self.parser.ParseFile(fp)
def _start_handler(self, name, attrs):
if name == 'PDBx:pdbx_poly_seq_scheme':
self._chain = attrs['asym_id']
self._seq_idx = attrs['seq_id']
if self._chain not in self.mapping:
self.mapping[self._chain] = {'seq': [], 'pdb': []}
elif self._chain and name == 'PDBx:auth_seq_num':
self._isordered = True
self._tag = 'auth_seq_num'
elif self._chain and name == 'PDBx:pdb_ins_code':
self._tag = 'pdb_ins_code'
def _data_handler(self, data):
if self._chain and self._tag == 'auth_seq_num':
self._pdb_idx = data
elif self._chain and self._tag == 'PDBx:pdb_ins_code':
self._inscode = data
def _end_handler(self, name):
if name == 'PDBx:pdbx_poly_seq_scheme':
if self._isordered:
self.mapping[self._chain]['seq'].append(self._seq_idx)
self.mapping[self._chain]['pdb'].append(self._pdb_idx +
self._inscode)
self._isordered = False
self._seq_id = None
self._pdb_idx = None
self._chain = None
self._inscode = ''
self._tag = None
def get_mapping(self, chain):
return self.mapping[chain]
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
<commit_before><commit_msg>Add parser to parse the mapping between seqid and author assigned PDB
indexes from PDBML format.<commit_after>import gzip
from xml.parsers import expat
class PDBMap(object):
"""Parse PDBML file for mappings between indexes of auth_seq_num
+ pdb_ind_code (residue index in PDB) and seq_id (starts with 1
and includes unobserved residues).
"""
def __init__(self, pdb):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self._start_handler
self.parser.EndElementHandler = self._end_handler
self.parser.CharacterDataHandler = self._data_handler
self.pdb = pdb.lower()
self.pdbml_path = '/usr2/pdb/data/structures/divided/XML-noatom/' + \
'%s/%s-noatom.xml.gz' % (self.pdb[1:3], self.pdb)
self._tag = None
self._chain = None
self._seq_idx = None
self._pdb_idx = None
self._inscode = ''
self._isordered = False
self.mapping = {}
with gzip.open(self.pdbml_path, 'rb') as fp:
self.parser.ParseFile(fp)
def _start_handler(self, name, attrs):
if name == 'PDBx:pdbx_poly_seq_scheme':
self._chain = attrs['asym_id']
self._seq_idx = attrs['seq_id']
if self._chain not in self.mapping:
self.mapping[self._chain] = {'seq': [], 'pdb': []}
elif self._chain and name == 'PDBx:auth_seq_num':
self._isordered = True
self._tag = 'auth_seq_num'
elif self._chain and name == 'PDBx:pdb_ins_code':
self._tag = 'pdb_ins_code'
def _data_handler(self, data):
if self._chain and self._tag == 'auth_seq_num':
self._pdb_idx = data
elif self._chain and self._tag == 'PDBx:pdb_ins_code':
self._inscode = data
def _end_handler(self, name):
if name == 'PDBx:pdbx_poly_seq_scheme':
if self._isordered:
self.mapping[self._chain]['seq'].append(self._seq_idx)
self.mapping[self._chain]['pdb'].append(self._pdb_idx +
self._inscode)
self._isordered = False
self._seq_id = None
self._pdb_idx = None
self._chain = None
self._inscode = ''
self._tag = None
def get_mapping(self, chain):
return self.mapping[chain]
# vim: ts=4 expandtab sw=4 sts=4 tw=78
|
|
e3e6d2acfc94927f145b132c69597fb1883646a9
|
src/test/test_util.py
|
src/test/test_util.py
|
#! /usr/bin/python
import unittest
sys.path.append(
os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"..",
"lib"))
import util
def test_data_dir():
return os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"test-data")
class UtilTestCase(unittest.TestCase):
def test__pass(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
|
Add skeleton unittest file for util.py .
|
Add skeleton unittest file for util.py .
|
Python
|
bsd-2-clause
|
rubasov/opensub-utils,rubasov/opensub-utils
|
Add skeleton unittest file for util.py .
|
#! /usr/bin/python
import unittest
sys.path.append(
os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"..",
"lib"))
import util
def test_data_dir():
return os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"test-data")
class UtilTestCase(unittest.TestCase):
def test__pass(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add skeleton unittest file for util.py .<commit_after>
|
#! /usr/bin/python
import unittest
sys.path.append(
os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"..",
"lib"))
import util
def test_data_dir():
return os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"test-data")
class UtilTestCase(unittest.TestCase):
def test__pass(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
|
Add skeleton unittest file for util.py .#! /usr/bin/python
import unittest
sys.path.append(
os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"..",
"lib"))
import util
def test_data_dir():
return os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"test-data")
class UtilTestCase(unittest.TestCase):
def test__pass(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add skeleton unittest file for util.py .<commit_after>#! /usr/bin/python
import unittest
sys.path.append(
os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"..",
"lib"))
import util
def test_data_dir():
return os.path.join(
os.path.dirname(
os.path.realpath(
__file__)),
"test-data")
class UtilTestCase(unittest.TestCase):
def test__pass(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
|
|
307e105ff075a4702169ca5b38e3f3307bfdad5a
|
tools/clean_submitted_plugins.py
|
tools/clean_submitted_plugins.py
|
"""Prints out submitted plugins that we don't already know about.
Also deletes any empty submissions.
"""
import json
import re
import rethinkdb as r
import db.github_repos
import db.util
r_conn = db.util.r_conn
_GITHUB_LINK_REGEX = re.compile(r'github.com/(.*?)/([^/?#]*)')
def delete_empty_submissions():
"""Delete submitted plugins that don't have enough info for us to act on.
Since we have no form validation, many submissions are just people who
click the "submit" button.
"""
deleted = r.table('submitted_plugins').filter({
'name': '',
'author': '',
'github-link': '',
'vimorg-link': '',
}).delete().run(r_conn())
print 'Deleted empty submissions:'
print deleted
def main():
delete_empty_submissions()
known_vimorg_plugins = []
known_github_plugins = []
new_plugins = []
unparseable_plugins = []
submissions = r.table('submitted_plugins').run(r_conn())
for submission in submissions:
if submission['vimorg-link']:
known_vimorg_plugins.append(submission)
continue
github_link = submission['github-link']
if github_link:
matches = _GITHUB_LINK_REGEX.findall(github_link)
if not matches:
unparseable_plugins.append(submission)
continue
repo_owner, repo_name = matches[0]
db_repo = db.github_repos.PluginGithubRepos.get_with_owner_repo(
repo_owner, repo_name)
if db_repo:
known_github_plugins.append(submission)
else:
new_plugins.append(submission)
print
print '%s submissions are known vim.org plugins' % len(
known_vimorg_plugins)
print '%s submissions are known github.com plugins' % len(
known_github_plugins)
print
print '%s submissions have unparseable github.com links:' % len(
unparseable_plugins)
for submission in unparseable_plugins:
print submission
print
print '%s submissions are new plugins:' % len(new_plugins)
for submission in new_plugins:
print json.dumps(submission, indent=2)
if __name__ == '__main__':
main()
|
Add a script to easily view new submitted plugins
|
Add a script to easily view new submitted plugins
Summary: That is, submitted plugins that we don't already know about and are non-empty.
Test Plan:
- imported the `submitted_plugins` table from prod
- ran `PYTHONPATH=. python tools/clean_submitted_plugins.py`
- saw the output looked reasonable
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D213
|
Python
|
mit
|
divad12/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,shaialon/vim-awesome,jonafato/vim-awesome,shaialon/vim-awesome,starcraftman/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,starcraftman/vim-awesome,shaialon/vim-awesome,jonafato/vim-awesome,starcraftman/vim-awesome,starcraftman/vim-awesome,vim-awesome/vim-awesome,shaialon/vim-awesome,vim-awesome/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome
|
Add a script to easily view new submitted plugins
Summary: That is, submitted plugins that we don't already know about and are non-empty.
Test Plan:
- imported the `submitted_plugins` table from prod
- ran `PYTHONPATH=. python tools/clean_submitted_plugins.py`
- saw the output looked reasonable
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D213
|
"""Prints out submitted plugins that we don't already know about.
Also deletes any empty submissions.
"""
import json
import re
import rethinkdb as r
import db.github_repos
import db.util
r_conn = db.util.r_conn
_GITHUB_LINK_REGEX = re.compile(r'github.com/(.*?)/([^/?#]*)')
def delete_empty_submissions():
"""Delete submitted plugins that don't have enough info for us to act on.
Since we have no form validation, many submissions are just people who
click the "submit" button.
"""
deleted = r.table('submitted_plugins').filter({
'name': '',
'author': '',
'github-link': '',
'vimorg-link': '',
}).delete().run(r_conn())
print 'Deleted empty submissions:'
print deleted
def main():
delete_empty_submissions()
known_vimorg_plugins = []
known_github_plugins = []
new_plugins = []
unparseable_plugins = []
submissions = r.table('submitted_plugins').run(r_conn())
for submission in submissions:
if submission['vimorg-link']:
known_vimorg_plugins.append(submission)
continue
github_link = submission['github-link']
if github_link:
matches = _GITHUB_LINK_REGEX.findall(github_link)
if not matches:
unparseable_plugins.append(submission)
continue
repo_owner, repo_name = matches[0]
db_repo = db.github_repos.PluginGithubRepos.get_with_owner_repo(
repo_owner, repo_name)
if db_repo:
known_github_plugins.append(submission)
else:
new_plugins.append(submission)
print
print '%s submissions are known vim.org plugins' % len(
known_vimorg_plugins)
print '%s submissions are known github.com plugins' % len(
known_github_plugins)
print
print '%s submissions have unparseable github.com links:' % len(
unparseable_plugins)
for submission in unparseable_plugins:
print submission
print
print '%s submissions are new plugins:' % len(new_plugins)
for submission in new_plugins:
print json.dumps(submission, indent=2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to easily view new submitted plugins
Summary: That is, submitted plugins that we don't already know about and are non-empty.
Test Plan:
- imported the `submitted_plugins` table from prod
- ran `PYTHONPATH=. python tools/clean_submitted_plugins.py`
- saw the output looked reasonable
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D213<commit_after>
|
"""Prints out submitted plugins that we don't already know about.
Also deletes any empty submissions.
"""
import json
import re
import rethinkdb as r
import db.github_repos
import db.util
r_conn = db.util.r_conn
_GITHUB_LINK_REGEX = re.compile(r'github.com/(.*?)/([^/?#]*)')
def delete_empty_submissions():
"""Delete submitted plugins that don't have enough info for us to act on.
Since we have no form validation, many submissions are just people who
click the "submit" button.
"""
deleted = r.table('submitted_plugins').filter({
'name': '',
'author': '',
'github-link': '',
'vimorg-link': '',
}).delete().run(r_conn())
print 'Deleted empty submissions:'
print deleted
def main():
delete_empty_submissions()
known_vimorg_plugins = []
known_github_plugins = []
new_plugins = []
unparseable_plugins = []
submissions = r.table('submitted_plugins').run(r_conn())
for submission in submissions:
if submission['vimorg-link']:
known_vimorg_plugins.append(submission)
continue
github_link = submission['github-link']
if github_link:
matches = _GITHUB_LINK_REGEX.findall(github_link)
if not matches:
unparseable_plugins.append(submission)
continue
repo_owner, repo_name = matches[0]
db_repo = db.github_repos.PluginGithubRepos.get_with_owner_repo(
repo_owner, repo_name)
if db_repo:
known_github_plugins.append(submission)
else:
new_plugins.append(submission)
print
print '%s submissions are known vim.org plugins' % len(
known_vimorg_plugins)
print '%s submissions are known github.com plugins' % len(
known_github_plugins)
print
print '%s submissions have unparseable github.com links:' % len(
unparseable_plugins)
for submission in unparseable_plugins:
print submission
print
print '%s submissions are new plugins:' % len(new_plugins)
for submission in new_plugins:
print json.dumps(submission, indent=2)
if __name__ == '__main__':
main()
|
Add a script to easily view new submitted plugins
Summary: That is, submitted plugins that we don't already know about and are non-empty.
Test Plan:
- imported the `submitted_plugins` table from prod
- ran `PYTHONPATH=. python tools/clean_submitted_plugins.py`
- saw the output looked reasonable
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D213"""Prints out submitted plugins that we don't already know about.
Also deletes any empty submissions.
"""
import json
import re
import rethinkdb as r
import db.github_repos
import db.util
r_conn = db.util.r_conn
_GITHUB_LINK_REGEX = re.compile(r'github.com/(.*?)/([^/?#]*)')
def delete_empty_submissions():
"""Delete submitted plugins that don't have enough info for us to act on.
Since we have no form validation, many submissions are just people who
click the "submit" button.
"""
deleted = r.table('submitted_plugins').filter({
'name': '',
'author': '',
'github-link': '',
'vimorg-link': '',
}).delete().run(r_conn())
print 'Deleted empty submissions:'
print deleted
def main():
delete_empty_submissions()
known_vimorg_plugins = []
known_github_plugins = []
new_plugins = []
unparseable_plugins = []
submissions = r.table('submitted_plugins').run(r_conn())
for submission in submissions:
if submission['vimorg-link']:
known_vimorg_plugins.append(submission)
continue
github_link = submission['github-link']
if github_link:
matches = _GITHUB_LINK_REGEX.findall(github_link)
if not matches:
unparseable_plugins.append(submission)
continue
repo_owner, repo_name = matches[0]
db_repo = db.github_repos.PluginGithubRepos.get_with_owner_repo(
repo_owner, repo_name)
if db_repo:
known_github_plugins.append(submission)
else:
new_plugins.append(submission)
print
print '%s submissions are known vim.org plugins' % len(
known_vimorg_plugins)
print '%s submissions are known github.com plugins' % len(
known_github_plugins)
print
print '%s submissions have unparseable github.com links:' % len(
unparseable_plugins)
for submission in unparseable_plugins:
print submission
print
print '%s submissions are new plugins:' % len(new_plugins)
for submission in new_plugins:
print json.dumps(submission, indent=2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to easily view new submitted plugins
Summary: That is, submitted plugins that we don't already know about and are non-empty.
Test Plan:
- imported the `submitted_plugins` table from prod
- ran `PYTHONPATH=. python tools/clean_submitted_plugins.py`
- saw the output looked reasonable
Reviewers: xymostech, spicyj
Reviewed By: spicyj
Differential Revision: http://phabricator.benalpert.com/D213<commit_after>"""Prints out submitted plugins that we don't already know about.
Also deletes any empty submissions.
"""
import json
import re
import rethinkdb as r
import db.github_repos
import db.util
r_conn = db.util.r_conn
_GITHUB_LINK_REGEX = re.compile(r'github.com/(.*?)/([^/?#]*)')
def delete_empty_submissions():
"""Delete submitted plugins that don't have enough info for us to act on.
Since we have no form validation, many submissions are just people who
click the "submit" button.
"""
deleted = r.table('submitted_plugins').filter({
'name': '',
'author': '',
'github-link': '',
'vimorg-link': '',
}).delete().run(r_conn())
print 'Deleted empty submissions:'
print deleted
def main():
delete_empty_submissions()
known_vimorg_plugins = []
known_github_plugins = []
new_plugins = []
unparseable_plugins = []
submissions = r.table('submitted_plugins').run(r_conn())
for submission in submissions:
if submission['vimorg-link']:
known_vimorg_plugins.append(submission)
continue
github_link = submission['github-link']
if github_link:
matches = _GITHUB_LINK_REGEX.findall(github_link)
if not matches:
unparseable_plugins.append(submission)
continue
repo_owner, repo_name = matches[0]
db_repo = db.github_repos.PluginGithubRepos.get_with_owner_repo(
repo_owner, repo_name)
if db_repo:
known_github_plugins.append(submission)
else:
new_plugins.append(submission)
print
print '%s submissions are known vim.org plugins' % len(
known_vimorg_plugins)
print '%s submissions are known github.com plugins' % len(
known_github_plugins)
print
print '%s submissions have unparseable github.com links:' % len(
unparseable_plugins)
for submission in unparseable_plugins:
print submission
print
print '%s submissions are new plugins:' % len(new_plugins)
for submission in new_plugins:
print json.dumps(submission, indent=2)
if __name__ == '__main__':
main()
|
|
a6b6439e0c142936248e1be46b575595a4b951b9
|
custom/enikshay/management/commands/get_users_for_bets.py
|
custom/enikshay/management/commands/get_users_for_bets.py
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.domain = domain
self.locations_by_id = {
loc.location_id: loc for loc in SQLLocation.objects.filter(domain=domain)
}
filename = 'agency_users.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'user_id',
'username',
'first_name',
'last_name',
'virtual_location_id',
'assigned_location_id',
'location_id',
'location_name',
])
for user in CommCareUser.by_domain(domain):
self.add_user(user, writer)
print "Wrote to {}".format(filename)
def add_user(self, user, writer):
if user.user_data.get('usertype', None) not in ['pcp', 'pcc-chemist', 'plc', 'pac']:
return
virtual_location_id = user.user_location_id,
assigned_location_id = user.location_id
location_id = virtual_location_id or assigned_location
location = self.locations_by_id.get(location_id, None) if location_id else None
if not location:
print "user {} {} has no location".format(user.username, user._id)
return
writer.writerow([
user._id,
user.username,
user.first_name,
user.last_name,
virtual_location_id,
assigned_location_id,
location_id,
location.name,
])
|
Add one-off script to get a csv of agency data
|
Add one-off script to get a csv of agency data
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add one-off script to get a csv of agency data
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.domain = domain
self.locations_by_id = {
loc.location_id: loc for loc in SQLLocation.objects.filter(domain=domain)
}
filename = 'agency_users.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'user_id',
'username',
'first_name',
'last_name',
'virtual_location_id',
'assigned_location_id',
'location_id',
'location_name',
])
for user in CommCareUser.by_domain(domain):
self.add_user(user, writer)
print "Wrote to {}".format(filename)
def add_user(self, user, writer):
if user.user_data.get('usertype', None) not in ['pcp', 'pcc-chemist', 'plc', 'pac']:
return
virtual_location_id = user.user_location_id,
assigned_location_id = user.location_id
location_id = virtual_location_id or assigned_location
location = self.locations_by_id.get(location_id, None) if location_id else None
if not location:
print "user {} {} has no location".format(user.username, user._id)
return
writer.writerow([
user._id,
user.username,
user.first_name,
user.last_name,
virtual_location_id,
assigned_location_id,
location_id,
location.name,
])
|
<commit_before><commit_msg>Add one-off script to get a csv of agency data<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.domain = domain
self.locations_by_id = {
loc.location_id: loc for loc in SQLLocation.objects.filter(domain=domain)
}
filename = 'agency_users.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'user_id',
'username',
'first_name',
'last_name',
'virtual_location_id',
'assigned_location_id',
'location_id',
'location_name',
])
for user in CommCareUser.by_domain(domain):
self.add_user(user, writer)
print "Wrote to {}".format(filename)
def add_user(self, user, writer):
if user.user_data.get('usertype', None) not in ['pcp', 'pcc-chemist', 'plc', 'pac']:
return
virtual_location_id = user.user_location_id,
assigned_location_id = user.location_id
location_id = virtual_location_id or assigned_location
location = self.locations_by_id.get(location_id, None) if location_id else None
if not location:
print "user {} {} has no location".format(user.username, user._id)
return
writer.writerow([
user._id,
user.username,
user.first_name,
user.last_name,
virtual_location_id,
assigned_location_id,
location_id,
location.name,
])
|
Add one-off script to get a csv of agency dataimport csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.domain = domain
self.locations_by_id = {
loc.location_id: loc for loc in SQLLocation.objects.filter(domain=domain)
}
filename = 'agency_users.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'user_id',
'username',
'first_name',
'last_name',
'virtual_location_id',
'assigned_location_id',
'location_id',
'location_name',
])
for user in CommCareUser.by_domain(domain):
self.add_user(user, writer)
print "Wrote to {}".format(filename)
def add_user(self, user, writer):
if user.user_data.get('usertype', None) not in ['pcp', 'pcc-chemist', 'plc', 'pac']:
return
virtual_location_id = user.user_location_id,
assigned_location_id = user.location_id
location_id = virtual_location_id or assigned_location
location = self.locations_by_id.get(location_id, None) if location_id else None
if not location:
print "user {} {} has no location".format(user.username, user._id)
return
writer.writerow([
user._id,
user.username,
user.first_name,
user.last_name,
virtual_location_id,
assigned_location_id,
location_id,
location.name,
])
|
<commit_before><commit_msg>Add one-off script to get a csv of agency data<commit_after>import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.apps.users.models import CommCareUser
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.domain = domain
self.locations_by_id = {
loc.location_id: loc for loc in SQLLocation.objects.filter(domain=domain)
}
filename = 'agency_users.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'user_id',
'username',
'first_name',
'last_name',
'virtual_location_id',
'assigned_location_id',
'location_id',
'location_name',
])
for user in CommCareUser.by_domain(domain):
self.add_user(user, writer)
print "Wrote to {}".format(filename)
def add_user(self, user, writer):
if user.user_data.get('usertype', None) not in ['pcp', 'pcc-chemist', 'plc', 'pac']:
return
virtual_location_id = user.user_location_id,
assigned_location_id = user.location_id
location_id = virtual_location_id or assigned_location
location = self.locations_by_id.get(location_id, None) if location_id else None
if not location:
print "user {} {} has no location".format(user.username, user._id)
return
writer.writerow([
user._id,
user.username,
user.first_name,
user.last_name,
virtual_location_id,
assigned_location_id,
location_id,
location.name,
])
|
|
0771bb8d5e1747df6b929d2473f97e687d9e3963
|
functionaltests/common/cleanup.py
|
functionaltests/common/cleanup.py
|
"""
Copyright 2015 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CleanUp(object):
def __init__(self, barbicanclient):
self.created_entities = {
'secret': [],
'container': [],
'order': []
}
self.barbicanclient = barbicanclient
def delete_all_entities(self):
"""Helper method to delete all containers and secrets used for
testing"""
self._delete_all_containers()
self._delete_all_orders()
self._delete_all_secrets()
def add_entity(self, entity):
"""Stores an entity in Barbican to be used for testing
and keeps track of entity for removal after tests are
run"""
entity_type = str(type(entity)).lower()
if 'secret' in entity_type:
entity_ref = entity.store()
entity_type = 'secret'
elif 'container' in entity_type:
entity_ref = entity.store()
entity_type = 'container'
else:
entity_ref = entity.submit()
entity_type = 'order'
self.created_entities[entity_type].append(entity_ref)
return entity_ref
def _delete_all_containers(self):
"""Helper method to delete all containers used for
testing"""
for container_ref in self.created_entities['container']:
self.barbicanclient.containers.delete(container_ref)
def _delete_all_secrets(self):
"""Helper method to delete all secrets used for testing"""
for secret_ref in self.created_entities['secret']:
self.barbicanclient.secrets.delete(secret_ref)
def _delete_all_orders(self):
"""Helper method to delete all orders and secrets used for testing"""
for order_ref in self.created_entities['order']:
order = self.barbicanclient.orders.get(order_ref)
if order.secret_ref:
self.barbicanclient.secrets.delete(order.secret_ref)
# see if containers are supported
container_attr_exists = getattr(order, "container_ref", None)
if container_attr_exists and order.container_ref:
self.barbicanclient.containers.delete(order.container_ref)
self.barbicanclient.orders.delete(order_ref)
|
Create Common functions used for cleaning up items used for testing
|
Create Common functions used for cleaning up items used for testing
Creates common functions used to cleanup secrets, containers, and
orders used in client functional tests.
Change-Id: I3c37ab78c8c2c1d9a6e5299f303d11d786f39b5e
|
Python
|
apache-2.0
|
openstack/python-barbicanclient
|
Create Common functions used for cleaning up items used for testing
Creates common functions used to cleanup secrets, containers, and
orders used in client functional tests.
Change-Id: I3c37ab78c8c2c1d9a6e5299f303d11d786f39b5e
|
"""
Copyright 2015 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CleanUp(object):
def __init__(self, barbicanclient):
self.created_entities = {
'secret': [],
'container': [],
'order': []
}
self.barbicanclient = barbicanclient
def delete_all_entities(self):
"""Helper method to delete all containers and secrets used for
testing"""
self._delete_all_containers()
self._delete_all_orders()
self._delete_all_secrets()
def add_entity(self, entity):
"""Stores an entity in Barbican to be used for testing
and keeps track of entity for removal after tests are
run"""
entity_type = str(type(entity)).lower()
if 'secret' in entity_type:
entity_ref = entity.store()
entity_type = 'secret'
elif 'container' in entity_type:
entity_ref = entity.store()
entity_type = 'container'
else:
entity_ref = entity.submit()
entity_type = 'order'
self.created_entities[entity_type].append(entity_ref)
return entity_ref
def _delete_all_containers(self):
"""Helper method to delete all containers used for
testing"""
for container_ref in self.created_entities['container']:
self.barbicanclient.containers.delete(container_ref)
def _delete_all_secrets(self):
"""Helper method to delete all secrets used for testing"""
for secret_ref in self.created_entities['secret']:
self.barbicanclient.secrets.delete(secret_ref)
def _delete_all_orders(self):
"""Helper method to delete all orders and secrets used for testing"""
for order_ref in self.created_entities['order']:
order = self.barbicanclient.orders.get(order_ref)
if order.secret_ref:
self.barbicanclient.secrets.delete(order.secret_ref)
# see if containers are supported
container_attr_exists = getattr(order, "container_ref", None)
if container_attr_exists and order.container_ref:
self.barbicanclient.containers.delete(order.container_ref)
self.barbicanclient.orders.delete(order_ref)
|
<commit_before><commit_msg>Create Common functions used for cleaning up items used for testing
Creates common functions used to cleanup secrets, containers, and
orders used in client functional tests.
Change-Id: I3c37ab78c8c2c1d9a6e5299f303d11d786f39b5e<commit_after>
|
"""
Copyright 2015 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CleanUp(object):
def __init__(self, barbicanclient):
self.created_entities = {
'secret': [],
'container': [],
'order': []
}
self.barbicanclient = barbicanclient
def delete_all_entities(self):
"""Helper method to delete all containers and secrets used for
testing"""
self._delete_all_containers()
self._delete_all_orders()
self._delete_all_secrets()
def add_entity(self, entity):
"""Stores an entity in Barbican to be used for testing
and keeps track of entity for removal after tests are
run"""
entity_type = str(type(entity)).lower()
if 'secret' in entity_type:
entity_ref = entity.store()
entity_type = 'secret'
elif 'container' in entity_type:
entity_ref = entity.store()
entity_type = 'container'
else:
entity_ref = entity.submit()
entity_type = 'order'
self.created_entities[entity_type].append(entity_ref)
return entity_ref
def _delete_all_containers(self):
"""Helper method to delete all containers used for
testing"""
for container_ref in self.created_entities['container']:
self.barbicanclient.containers.delete(container_ref)
def _delete_all_secrets(self):
"""Helper method to delete all secrets used for testing"""
for secret_ref in self.created_entities['secret']:
self.barbicanclient.secrets.delete(secret_ref)
def _delete_all_orders(self):
"""Helper method to delete all orders and secrets used for testing"""
for order_ref in self.created_entities['order']:
order = self.barbicanclient.orders.get(order_ref)
if order.secret_ref:
self.barbicanclient.secrets.delete(order.secret_ref)
# see if containers are supported
container_attr_exists = getattr(order, "container_ref", None)
if container_attr_exists and order.container_ref:
self.barbicanclient.containers.delete(order.container_ref)
self.barbicanclient.orders.delete(order_ref)
|
Create Common functions used for cleaning up items used for testing
Creates common functions used to cleanup secrets, containers, and
orders used in client functional tests.
Change-Id: I3c37ab78c8c2c1d9a6e5299f303d11d786f39b5e"""
Copyright 2015 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CleanUp(object):
def __init__(self, barbicanclient):
self.created_entities = {
'secret': [],
'container': [],
'order': []
}
self.barbicanclient = barbicanclient
def delete_all_entities(self):
"""Helper method to delete all containers and secrets used for
testing"""
self._delete_all_containers()
self._delete_all_orders()
self._delete_all_secrets()
def add_entity(self, entity):
"""Stores an entity in Barbican to be used for testing
and keeps track of entity for removal after tests are
run"""
entity_type = str(type(entity)).lower()
if 'secret' in entity_type:
entity_ref = entity.store()
entity_type = 'secret'
elif 'container' in entity_type:
entity_ref = entity.store()
entity_type = 'container'
else:
entity_ref = entity.submit()
entity_type = 'order'
self.created_entities[entity_type].append(entity_ref)
return entity_ref
def _delete_all_containers(self):
"""Helper method to delete all containers used for
testing"""
for container_ref in self.created_entities['container']:
self.barbicanclient.containers.delete(container_ref)
def _delete_all_secrets(self):
"""Helper method to delete all secrets used for testing"""
for secret_ref in self.created_entities['secret']:
self.barbicanclient.secrets.delete(secret_ref)
def _delete_all_orders(self):
"""Helper method to delete all orders and secrets used for testing"""
for order_ref in self.created_entities['order']:
order = self.barbicanclient.orders.get(order_ref)
if order.secret_ref:
self.barbicanclient.secrets.delete(order.secret_ref)
# see if containers are supported
container_attr_exists = getattr(order, "container_ref", None)
if container_attr_exists and order.container_ref:
self.barbicanclient.containers.delete(order.container_ref)
self.barbicanclient.orders.delete(order_ref)
|
<commit_before><commit_msg>Create Common functions used for cleaning up items used for testing
Creates common functions used to cleanup secrets, containers, and
orders used in client functional tests.
Change-Id: I3c37ab78c8c2c1d9a6e5299f303d11d786f39b5e<commit_after>"""
Copyright 2015 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class CleanUp(object):
def __init__(self, barbicanclient):
self.created_entities = {
'secret': [],
'container': [],
'order': []
}
self.barbicanclient = barbicanclient
def delete_all_entities(self):
"""Helper method to delete all containers and secrets used for
testing"""
self._delete_all_containers()
self._delete_all_orders()
self._delete_all_secrets()
def add_entity(self, entity):
"""Stores an entity in Barbican to be used for testing
and keeps track of entity for removal after tests are
run"""
entity_type = str(type(entity)).lower()
if 'secret' in entity_type:
entity_ref = entity.store()
entity_type = 'secret'
elif 'container' in entity_type:
entity_ref = entity.store()
entity_type = 'container'
else:
entity_ref = entity.submit()
entity_type = 'order'
self.created_entities[entity_type].append(entity_ref)
return entity_ref
def _delete_all_containers(self):
"""Helper method to delete all containers used for
testing"""
for container_ref in self.created_entities['container']:
self.barbicanclient.containers.delete(container_ref)
def _delete_all_secrets(self):
"""Helper method to delete all secrets used for testing"""
for secret_ref in self.created_entities['secret']:
self.barbicanclient.secrets.delete(secret_ref)
def _delete_all_orders(self):
"""Helper method to delete all orders and secrets used for testing"""
for order_ref in self.created_entities['order']:
order = self.barbicanclient.orders.get(order_ref)
if order.secret_ref:
self.barbicanclient.secrets.delete(order.secret_ref)
# see if containers are supported
container_attr_exists = getattr(order, "container_ref", None)
if container_attr_exists and order.container_ref:
self.barbicanclient.containers.delete(order.container_ref)
self.barbicanclient.orders.delete(order_ref)
|
|
5847190f8954e7da2871a8419495014bab0e7326
|
examples/mnist_mlp.py
|
examples/mnist_mlp.py
|
from __future__ import print_function
import plac
from thinc.neural.vec2vec import ReLu, Softmax
from thinc.api import clone, chain
from thinc.extra import datasets
from thinc.neural.loss import categorical_crossentropy
from thinc.neural.util import score_model
import pickle
def main(depth=2, width=512, nb_epoch=20):
with Model.define_operators({'*': clone, '>>': chain}):
model = ReLu(width) * depth >> Softmax()
(train_X, train_Y), (dev_X, dev_Y), (test_X, test_Y) = datasets.mnist()
with model.begin_training(train_X, train_Y):
optimizer = Adam(0.001)
for i in range(nb_epoch):
for X, y in (train_X, train_Y):
yh, backprop = model.begin_update(X)
loss, d_loss = categorical_crossentropy(y, yh)
backprop(d_loss)
for name, param, d_param in model.weights:
if d_param is not None:
optimizer(param, d_param, key=name)
with model.use_params(optimizer.averages):
dev_acc_avg = model.evaluate(dev_X, dev_Y)
print('Avg dev.: %.3f' % dev_acc_avg)
with model.use_params(optimizer.averages):
print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_Y))
print('Avg test.: %.3f' % model.evaluate(test_X, test_Y))
with open('out.pickle', 'wb') as file_:
pickle.dump(model, file_, -1)
if __name__ == '__main__':
plac.call(main)
|
Change MNIST example for future API
|
Change MNIST example for future API
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc
|
Change MNIST example for future API
|
from __future__ import print_function
import plac
from thinc.neural.vec2vec import ReLu, Softmax
from thinc.api import clone, chain
from thinc.extra import datasets
from thinc.neural.loss import categorical_crossentropy
from thinc.neural.util import score_model
import pickle
def main(depth=2, width=512, nb_epoch=20):
with Model.define_operators({'*': clone, '>>': chain}):
model = ReLu(width) * depth >> Softmax()
(train_X, train_Y), (dev_X, dev_Y), (test_X, test_Y) = datasets.mnist()
with model.begin_training(train_X, train_Y):
optimizer = Adam(0.001)
for i in range(nb_epoch):
for X, y in (train_X, train_Y):
yh, backprop = model.begin_update(X)
loss, d_loss = categorical_crossentropy(y, yh)
backprop(d_loss)
for name, param, d_param in model.weights:
if d_param is not None:
optimizer(param, d_param, key=name)
with model.use_params(optimizer.averages):
dev_acc_avg = model.evaluate(dev_X, dev_Y)
print('Avg dev.: %.3f' % dev_acc_avg)
with model.use_params(optimizer.averages):
print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_Y))
print('Avg test.: %.3f' % model.evaluate(test_X, test_Y))
with open('out.pickle', 'wb') as file_:
pickle.dump(model, file_, -1)
if __name__ == '__main__':
plac.call(main)
|
<commit_before><commit_msg>Change MNIST example for future API<commit_after>
|
from __future__ import print_function
import plac
from thinc.neural.vec2vec import ReLu, Softmax
from thinc.api import clone, chain
from thinc.extra import datasets
from thinc.neural.loss import categorical_crossentropy
from thinc.neural.util import score_model
import pickle
def main(depth=2, width=512, nb_epoch=20):
with Model.define_operators({'*': clone, '>>': chain}):
model = ReLu(width) * depth >> Softmax()
(train_X, train_Y), (dev_X, dev_Y), (test_X, test_Y) = datasets.mnist()
with model.begin_training(train_X, train_Y):
optimizer = Adam(0.001)
for i in range(nb_epoch):
for X, y in (train_X, train_Y):
yh, backprop = model.begin_update(X)
loss, d_loss = categorical_crossentropy(y, yh)
backprop(d_loss)
for name, param, d_param in model.weights:
if d_param is not None:
optimizer(param, d_param, key=name)
with model.use_params(optimizer.averages):
dev_acc_avg = model.evaluate(dev_X, dev_Y)
print('Avg dev.: %.3f' % dev_acc_avg)
with model.use_params(optimizer.averages):
print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_Y))
print('Avg test.: %.3f' % model.evaluate(test_X, test_Y))
with open('out.pickle', 'wb') as file_:
pickle.dump(model, file_, -1)
if __name__ == '__main__':
plac.call(main)
|
Change MNIST example for future APIfrom __future__ import print_function
import plac
from thinc.neural.vec2vec import ReLu, Softmax
from thinc.api import clone, chain
from thinc.extra import datasets
from thinc.neural.loss import categorical_crossentropy
from thinc.neural.util import score_model
import pickle
def main(depth=2, width=512, nb_epoch=20):
with Model.define_operators({'*': clone, '>>': chain}):
model = ReLu(width) * depth >> Softmax()
(train_X, train_Y), (dev_X, dev_Y), (test_X, test_Y) = datasets.mnist()
with model.begin_training(train_X, train_Y):
optimizer = Adam(0.001)
for i in range(nb_epoch):
for X, y in (train_X, train_Y):
yh, backprop = model.begin_update(X)
loss, d_loss = categorical_crossentropy(y, yh)
backprop(d_loss)
for name, param, d_param in model.weights:
if d_param is not None:
optimizer(param, d_param, key=name)
with model.use_params(optimizer.averages):
dev_acc_avg = model.evaluate(dev_X, dev_Y)
print('Avg dev.: %.3f' % dev_acc_avg)
with model.use_params(optimizer.averages):
print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_Y))
print('Avg test.: %.3f' % model.evaluate(test_X, test_Y))
with open('out.pickle', 'wb') as file_:
pickle.dump(model, file_, -1)
if __name__ == '__main__':
plac.call(main)
|
<commit_before><commit_msg>Change MNIST example for future API<commit_after>from __future__ import print_function
import plac
from thinc.neural.vec2vec import ReLu, Softmax
from thinc.api import clone, chain
from thinc.extra import datasets
from thinc.neural.loss import categorical_crossentropy
from thinc.neural.util import score_model
import pickle
def main(depth=2, width=512, nb_epoch=20):
with Model.define_operators({'*': clone, '>>': chain}):
model = ReLu(width) * depth >> Softmax()
(train_X, train_Y), (dev_X, dev_Y), (test_X, test_Y) = datasets.mnist()
with model.begin_training(train_X, train_Y):
optimizer = Adam(0.001)
for i in range(nb_epoch):
for X, y in (train_X, train_Y):
yh, backprop = model.begin_update(X)
loss, d_loss = categorical_crossentropy(y, yh)
backprop(d_loss)
for name, param, d_param in model.weights:
if d_param is not None:
optimizer(param, d_param, key=name)
with model.use_params(optimizer.averages):
dev_acc_avg = model.evaluate(dev_X, dev_Y)
print('Avg dev.: %.3f' % dev_acc_avg)
with model.use_params(optimizer.averages):
print('Avg dev.: %.3f' % model.evaluate(dev_X, dev_Y))
print('Avg test.: %.3f' % model.evaluate(test_X, test_Y))
with open('out.pickle', 'wb') as file_:
pickle.dump(model, file_, -1)
if __name__ == '__main__':
plac.call(main)
|
|
a7449b94b4171e86cd0b5260464949bc4ece7883
|
src/sentry/api/serializers/models/project.py
|
src/sentry/api/serializers/models/project.py
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
Add Project.first_event to API serializer
|
Add Project.first_event to API serializer
|
Python
|
bsd-3-clause
|
mvaled/sentry,fotinakis/sentry,JamesMura/sentry,mvaled/sentry,BuildingLink/sentry,imankulov/sentry,JackDanger/sentry,looker/sentry,zenefits/sentry,daevaorn/sentry,nicholasserra/sentry,looker/sentry,JamesMura/sentry,mitsuhiko/sentry,looker/sentry,nicholasserra/sentry,nicholasserra/sentry,BuildingLink/sentry,JackDanger/sentry,zenefits/sentry,ifduyue/sentry,ifduyue/sentry,BayanGroup/sentry,daevaorn/sentry,fotinakis/sentry,gencer/sentry,daevaorn/sentry,JamesMura/sentry,JamesMura/sentry,gencer/sentry,alexm92/sentry,mvaled/sentry,zenefits/sentry,beeftornado/sentry,zenefits/sentry,fotinakis/sentry,gencer/sentry,imankulov/sentry,BayanGroup/sentry,jean/sentry,gencer/sentry,zenefits/sentry,gencer/sentry,mvaled/sentry,alexm92/sentry,ifduyue/sentry,fotinakis/sentry,BuildingLink/sentry,ifduyue/sentry,looker/sentry,BuildingLink/sentry,BayanGroup/sentry,jean/sentry,looker/sentry,BuildingLink/sentry,mitsuhiko/sentry,JamesMura/sentry,jean/sentry,imankulov/sentry,mvaled/sentry,ifduyue/sentry,mvaled/sentry,jean/sentry,jean/sentry,beeftornado/sentry,JackDanger/sentry,daevaorn/sentry,beeftornado/sentry,alexm92/sentry
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
Add Project.first_event to API serializer
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
<commit_before>from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
<commit_msg>Add Project.first_event to API serializer<commit_after>
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
Add Project.first_event to API serializerfrom __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
<commit_before>from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
<commit_msg>Add Project.first_event to API serializer<commit_after>from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
from sentry import features
feature_list = []
if features.has('projects:quotas', obj, actor=user):
feature_list.append('quotas')
if features.has('projects:user-reports', obj, actor=user):
feature_list.append('user-reports')
return {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'firstEvent': obj.first_event,
'features': feature_list,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
|
1b921e83d000d024e38b0d7f81984b699cb49fac
|
fmriprep/cli/sample_openfmri_tasks_list.py
|
fmriprep/cli/sample_openfmri_tasks_list.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {dataset_dir}/out/ participant \
-w {dataset_dir}/work --participant_label {participant_label} \
--mem-mb 96000 --nthreads 68 --omp-nthreads 12\
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('-o', '--output-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_1*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
for dset, sublist in sampledict.items():
os.mkdir(dset)
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.output_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
Add simple script to write tasks_list file
|
[skip ci] Add simple script to write tasks_list file
|
Python
|
bsd-3-clause
|
poldracklab/fmriprep,poldracklab/preprocessing-workflow,oesteban/preprocessing-workflow,poldracklab/fmriprep,poldracklab/fmriprep,oesteban/fmriprep,oesteban/fmriprep,oesteban/fmriprep,poldracklab/preprocessing-workflow,oesteban/preprocessing-workflow
|
[skip ci] Add simple script to write tasks_list file
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {dataset_dir}/out/ participant \
-w {dataset_dir}/work --participant_label {participant_label} \
--mem-mb 96000 --nthreads 68 --omp-nthreads 12\
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('-o', '--output-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_1*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
for dset, sublist in sampledict.items():
os.mkdir(dset)
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.output_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[skip ci] Add simple script to write tasks_list file<commit_after>
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {dataset_dir}/out/ participant \
-w {dataset_dir}/work --participant_label {participant_label} \
--mem-mb 96000 --nthreads 68 --omp-nthreads 12\
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('-o', '--output-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_1*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
for dset, sublist in sampledict.items():
os.mkdir(dset)
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.output_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
[skip ci] Add simple script to write tasks_list file# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {dataset_dir}/out/ participant \
-w {dataset_dir}/work --participant_label {participant_label} \
--mem-mb 96000 --nthreads 68 --omp-nthreads 12\
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('-o', '--output-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_1*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
for dset, sublist in sampledict.items():
os.mkdir(dset)
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.output_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[skip ci] Add simple script to write tasks_list file<commit_after># -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
A tool to generate a tasks_list.sh file for running fmriprep
on subjects downloaded with datalad with sample_openfmri.py
"""
import os
import glob
CMDLINE = """\
{fmriprep_cmd} {bids_dir}/{dataset_dir} {dataset_dir}/out/ participant \
-w {dataset_dir}/work --participant_label {participant_label} \
--mem-mb 96000 --nthreads 68 --omp-nthreads 12\
"""
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(
description='OpenfMRI participants sampler, for FMRIPREP\'s testing purposes',
formatter_class=RawTextHelpFormatter)
parser.add_argument('openfmri_dir', action='store',
help='the root folder of a the openfmri dataset')
parser.add_argument('sample_file', action='store',
help='a YAML file containing the subsample schedule')
# optional arguments
parser.add_argument('--anat-only', action='store_true', default=False,
help='run only anatomical workflow')
parser.add_argument('-o', '--output-file', default='tasks_list.sh',
action='store', help='write output file')
parser.add_argument('--cmd-call', action='store', help='command to be run')
return parser
def main():
"""Entry point"""
import yaml
opts = get_parser().parse_args()
with open(opts.sample_file) as sfh:
sampledict = yaml.load(sfh)
cmdline = CMDLINE
if opts.anat_only:
cmdline += ' --anat-only'
fmriprep_cmd = 'fmriprep'
if opts.cmd_call is None:
singularity_dir = os.getenv('SINGULARITY_BIN')
singularity_img = sorted(
glob.glob(os.path.join(singularity_dir, 'poldracklab_fmriprep_1*')))
if singularity_img:
fmriprep_cmd = 'singularity run %s' % singularity_img[-1]
task_cmds = []
for dset, sublist in sampledict.items():
os.mkdir(dset)
for sub in sublist:
cmd = cmdline.format(
fmriprep_cmd=fmriprep_cmd,
bids_dir=opts.openfmri_dir,
dataset_dir=dset,
participant_label=sub,
)
task_cmds.append(cmd)
with open(opts.output_file, 'w') as tlfile:
tlfile.write('\n'.join(task_cmds))
if __name__ == '__main__':
main()
|
|
f4eec09a414157b423ac4790dfe22bc09e55bcdf
|
misc/mac2lla.py
|
misc/mac2lla.py
|
def mac_to_lladdr(mac):
macpieces = []
mac = mac.replace('-', ':')
for byte in mac.split(':'):
macpieces.append(int(byte, 16))
macpieces[0] = macpieces[0] ^ 2
llapieces = [(macpieces[0] << 8) + macpieces[1], (macpieces[2] << 8) + 0xff, 0xfe00 + macpieces[3], (macpieces[4] << 8) + macpieces[5]]
return 'fe80::{:x}:{:x}:{:x}:{:x}'.format(*llapieces)
if __name__ == '__main__':
import sys
print(mac_to_lladdr(sys.argv[1]))
|
ADd a sample for converting mac to lla
|
ADd a sample for converting mac to lla
|
Python
|
apache-2.0
|
jjohnson42/confluent,xcat2/confluent,xcat2/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,jjohnson42/confluent,jjohnson42/confluent,jjohnson42/confluent
|
ADd a sample for converting mac to lla
|
def mac_to_lladdr(mac):
macpieces = []
mac = mac.replace('-', ':')
for byte in mac.split(':'):
macpieces.append(int(byte, 16))
macpieces[0] = macpieces[0] ^ 2
llapieces = [(macpieces[0] << 8) + macpieces[1], (macpieces[2] << 8) + 0xff, 0xfe00 + macpieces[3], (macpieces[4] << 8) + macpieces[5]]
return 'fe80::{:x}:{:x}:{:x}:{:x}'.format(*llapieces)
if __name__ == '__main__':
import sys
print(mac_to_lladdr(sys.argv[1]))
|
<commit_before><commit_msg>ADd a sample for converting mac to lla<commit_after>
|
def mac_to_lladdr(mac):
macpieces = []
mac = mac.replace('-', ':')
for byte in mac.split(':'):
macpieces.append(int(byte, 16))
macpieces[0] = macpieces[0] ^ 2
llapieces = [(macpieces[0] << 8) + macpieces[1], (macpieces[2] << 8) + 0xff, 0xfe00 + macpieces[3], (macpieces[4] << 8) + macpieces[5]]
return 'fe80::{:x}:{:x}:{:x}:{:x}'.format(*llapieces)
if __name__ == '__main__':
import sys
print(mac_to_lladdr(sys.argv[1]))
|
ADd a sample for converting mac to lladef mac_to_lladdr(mac):
macpieces = []
mac = mac.replace('-', ':')
for byte in mac.split(':'):
macpieces.append(int(byte, 16))
macpieces[0] = macpieces[0] ^ 2
llapieces = [(macpieces[0] << 8) + macpieces[1], (macpieces[2] << 8) + 0xff, 0xfe00 + macpieces[3], (macpieces[4] << 8) + macpieces[5]]
return 'fe80::{:x}:{:x}:{:x}:{:x}'.format(*llapieces)
if __name__ == '__main__':
import sys
print(mac_to_lladdr(sys.argv[1]))
|
<commit_before><commit_msg>ADd a sample for converting mac to lla<commit_after>def mac_to_lladdr(mac):
macpieces = []
mac = mac.replace('-', ':')
for byte in mac.split(':'):
macpieces.append(int(byte, 16))
macpieces[0] = macpieces[0] ^ 2
llapieces = [(macpieces[0] << 8) + macpieces[1], (macpieces[2] << 8) + 0xff, 0xfe00 + macpieces[3], (macpieces[4] << 8) + macpieces[5]]
return 'fe80::{:x}:{:x}:{:x}:{:x}'.format(*llapieces)
if __name__ == '__main__':
import sys
print(mac_to_lladdr(sys.argv[1]))
|
|
0bca0dfa3fdde298f615e0cbd5239a25bb3cc410
|
fortin.py
|
fortin.py
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
RT = FiniteElement("RT", triangle, 1)
BRT = BrokenElement(RT)
U = FunctionSpace(mesh, RT)
Ud = FunctionSpace(mesh, BRT)
x = SpatialCoordinate(mesh)
fct = as_vector([x[0] ** 2, x[1] ** 2])
u0 = Function(U).project(fct)
u = Function(Ud).project(fct)
Tr = FunctionSpace(mesh, "HDiv Trace", 0)
gammar = TestFunction(Tr)
lambdar = TrialFunction(Tr)
n = FacetNormal(mesh)
a_dS = lambdar('+')*gammar('+')*dS + lambdar*gammar*ds
l_dS = gammar('+')*0.5*jump(dot(u, n))*dS + gammar*0.5*dot(u, n)*ds
f = Function(Tr)
# A = assemble(a_dS).M.values
# b = assemble(l_dS).dat.data
solve(a_dS == l_dS, f)
ubar = TrialFunction(U)
a = gammar('+')*dot(ubar, n)('+')*dS + gammar*dot(ubar, n)*ds
l = gammar('+')*f('+')*dS + gammar*f*ds
a_global = gammar*jump(ubar, n=n)*dS + gammar*dot(ubar, n)*ds
M = Tensor(a)
q = Tensor(l)
x = assemble(M.inv*q)
ans = Function(U)
# solve(a == l, ans)
count_code = """
for (int i; i<count.ndofs; ++i) {
count[i][0] += 1.0;
}
"""
kernel_code = """
for (int i; i<ubar.ndofs; ++i) {
ubar[i][0] += u[i][0]/one[i][0];
}
"""
One = Function(U)
par_loop(count_code, dx, {"count":(One, INC)})
par_loop(kernel_code, dx, {"ubar":(ans, INC), "u":(u, READ), "one":(One, READ)})
|
Add a custom kernel for repairing continuity of broken solutions
|
Add a custom kernel for repairing continuity of broken solutions
|
Python
|
mit
|
thomasgibson/firedrake-hybridization
|
Add a custom kernel for repairing continuity of broken solutions
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
RT = FiniteElement("RT", triangle, 1)
BRT = BrokenElement(RT)
U = FunctionSpace(mesh, RT)
Ud = FunctionSpace(mesh, BRT)
x = SpatialCoordinate(mesh)
fct = as_vector([x[0] ** 2, x[1] ** 2])
u0 = Function(U).project(fct)
u = Function(Ud).project(fct)
Tr = FunctionSpace(mesh, "HDiv Trace", 0)
gammar = TestFunction(Tr)
lambdar = TrialFunction(Tr)
n = FacetNormal(mesh)
a_dS = lambdar('+')*gammar('+')*dS + lambdar*gammar*ds
l_dS = gammar('+')*0.5*jump(dot(u, n))*dS + gammar*0.5*dot(u, n)*ds
f = Function(Tr)
# A = assemble(a_dS).M.values
# b = assemble(l_dS).dat.data
solve(a_dS == l_dS, f)
ubar = TrialFunction(U)
a = gammar('+')*dot(ubar, n)('+')*dS + gammar*dot(ubar, n)*ds
l = gammar('+')*f('+')*dS + gammar*f*ds
a_global = gammar*jump(ubar, n=n)*dS + gammar*dot(ubar, n)*ds
M = Tensor(a)
q = Tensor(l)
x = assemble(M.inv*q)
ans = Function(U)
# solve(a == l, ans)
count_code = """
for (int i; i<count.ndofs; ++i) {
count[i][0] += 1.0;
}
"""
kernel_code = """
for (int i; i<ubar.ndofs; ++i) {
ubar[i][0] += u[i][0]/one[i][0];
}
"""
One = Function(U)
par_loop(count_code, dx, {"count":(One, INC)})
par_loop(kernel_code, dx, {"ubar":(ans, INC), "u":(u, READ), "one":(One, READ)})
|
<commit_before><commit_msg>Add a custom kernel for repairing continuity of broken solutions<commit_after>
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
RT = FiniteElement("RT", triangle, 1)
BRT = BrokenElement(RT)
U = FunctionSpace(mesh, RT)
Ud = FunctionSpace(mesh, BRT)
x = SpatialCoordinate(mesh)
fct = as_vector([x[0] ** 2, x[1] ** 2])
u0 = Function(U).project(fct)
u = Function(Ud).project(fct)
Tr = FunctionSpace(mesh, "HDiv Trace", 0)
gammar = TestFunction(Tr)
lambdar = TrialFunction(Tr)
n = FacetNormal(mesh)
a_dS = lambdar('+')*gammar('+')*dS + lambdar*gammar*ds
l_dS = gammar('+')*0.5*jump(dot(u, n))*dS + gammar*0.5*dot(u, n)*ds
f = Function(Tr)
# A = assemble(a_dS).M.values
# b = assemble(l_dS).dat.data
solve(a_dS == l_dS, f)
ubar = TrialFunction(U)
a = gammar('+')*dot(ubar, n)('+')*dS + gammar*dot(ubar, n)*ds
l = gammar('+')*f('+')*dS + gammar*f*ds
a_global = gammar*jump(ubar, n=n)*dS + gammar*dot(ubar, n)*ds
M = Tensor(a)
q = Tensor(l)
x = assemble(M.inv*q)
ans = Function(U)
# solve(a == l, ans)
count_code = """
for (int i; i<count.ndofs; ++i) {
count[i][0] += 1.0;
}
"""
kernel_code = """
for (int i; i<ubar.ndofs; ++i) {
ubar[i][0] += u[i][0]/one[i][0];
}
"""
One = Function(U)
par_loop(count_code, dx, {"count":(One, INC)})
par_loop(kernel_code, dx, {"ubar":(ans, INC), "u":(u, READ), "one":(One, READ)})
|
Add a custom kernel for repairing continuity of broken solutionsfrom __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
RT = FiniteElement("RT", triangle, 1)
BRT = BrokenElement(RT)
U = FunctionSpace(mesh, RT)
Ud = FunctionSpace(mesh, BRT)
x = SpatialCoordinate(mesh)
fct = as_vector([x[0] ** 2, x[1] ** 2])
u0 = Function(U).project(fct)
u = Function(Ud).project(fct)
Tr = FunctionSpace(mesh, "HDiv Trace", 0)
gammar = TestFunction(Tr)
lambdar = TrialFunction(Tr)
n = FacetNormal(mesh)
a_dS = lambdar('+')*gammar('+')*dS + lambdar*gammar*ds
l_dS = gammar('+')*0.5*jump(dot(u, n))*dS + gammar*0.5*dot(u, n)*ds
f = Function(Tr)
# A = assemble(a_dS).M.values
# b = assemble(l_dS).dat.data
solve(a_dS == l_dS, f)
ubar = TrialFunction(U)
a = gammar('+')*dot(ubar, n)('+')*dS + gammar*dot(ubar, n)*ds
l = gammar('+')*f('+')*dS + gammar*f*ds
a_global = gammar*jump(ubar, n=n)*dS + gammar*dot(ubar, n)*ds
M = Tensor(a)
q = Tensor(l)
x = assemble(M.inv*q)
ans = Function(U)
# solve(a == l, ans)
count_code = """
for (int i; i<count.ndofs; ++i) {
count[i][0] += 1.0;
}
"""
kernel_code = """
for (int i; i<ubar.ndofs; ++i) {
ubar[i][0] += u[i][0]/one[i][0];
}
"""
One = Function(U)
par_loop(count_code, dx, {"count":(One, INC)})
par_loop(kernel_code, dx, {"ubar":(ans, INC), "u":(u, READ), "one":(One, READ)})
|
<commit_before><commit_msg>Add a custom kernel for repairing continuity of broken solutions<commit_after>from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
RT = FiniteElement("RT", triangle, 1)
BRT = BrokenElement(RT)
U = FunctionSpace(mesh, RT)
Ud = FunctionSpace(mesh, BRT)
x = SpatialCoordinate(mesh)
fct = as_vector([x[0] ** 2, x[1] ** 2])
u0 = Function(U).project(fct)
u = Function(Ud).project(fct)
Tr = FunctionSpace(mesh, "HDiv Trace", 0)
gammar = TestFunction(Tr)
lambdar = TrialFunction(Tr)
n = FacetNormal(mesh)
a_dS = lambdar('+')*gammar('+')*dS + lambdar*gammar*ds
l_dS = gammar('+')*0.5*jump(dot(u, n))*dS + gammar*0.5*dot(u, n)*ds
f = Function(Tr)
# A = assemble(a_dS).M.values
# b = assemble(l_dS).dat.data
solve(a_dS == l_dS, f)
ubar = TrialFunction(U)
a = gammar('+')*dot(ubar, n)('+')*dS + gammar*dot(ubar, n)*ds
l = gammar('+')*f('+')*dS + gammar*f*ds
a_global = gammar*jump(ubar, n=n)*dS + gammar*dot(ubar, n)*ds
M = Tensor(a)
q = Tensor(l)
x = assemble(M.inv*q)
ans = Function(U)
# solve(a == l, ans)
count_code = """
for (int i; i<count.ndofs; ++i) {
count[i][0] += 1.0;
}
"""
kernel_code = """
for (int i; i<ubar.ndofs; ++i) {
ubar[i][0] += u[i][0]/one[i][0];
}
"""
One = Function(U)
par_loop(count_code, dx, {"count":(One, INC)})
par_loop(kernel_code, dx, {"ubar":(ans, INC), "u":(u, READ), "one":(One, READ)})
|
|
002925683bf5d1e8dae12923a248eca77db4e5c7
|
tests/azure_disk_integration_test.py
|
tests/azure_disk_integration_test.py
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Azure scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class AzureScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for Azure disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPremiumStorage(self):
test_util.assertDiskMounts({
'flags': {
'azure_storage_type': 'PLRS'
},
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_DS2',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'remote_ssd',
'disk_size': 10, # disk size must be between
# 10 and 1024 GB.
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_G1',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
|
Add Azure disk integration test
|
Add Azure disk integration test
Test that we can actually create a disk in an Azure VM. This is very
similar to the GCP and AWS disk integration tests.
|
Python
|
apache-2.0
|
syed/PerfKitBenchmarker,AdamIsrael/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,syed/PerfKitBenchmarker,kivio/PerfKitBenchmarker,mateusz-blaszkowski/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,meteorfox/PerfKitBenchmarker,mateusz-blaszkowski/PerfKitBenchmarker,meteorfox/PerfKitBenchmarker,AdamIsrael/PerfKitBenchmarker,kivio/PerfKitBenchmarker
|
Add Azure disk integration test
Test that we can actually create a disk in an Azure VM. This is very
similar to the GCP and AWS disk integration tests.
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Azure scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class AzureScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for Azure disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPremiumStorage(self):
test_util.assertDiskMounts({
'flags': {
'azure_storage_type': 'PLRS'
},
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_DS2',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'remote_ssd',
'disk_size': 10, # disk size must be between
# 10 and 1024 GB.
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_G1',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
|
<commit_before><commit_msg>Add Azure disk integration test
Test that we can actually create a disk in an Azure VM. This is very
similar to the GCP and AWS disk integration tests.<commit_after>
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Azure scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class AzureScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for Azure disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPremiumStorage(self):
test_util.assertDiskMounts({
'flags': {
'azure_storage_type': 'PLRS'
},
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_DS2',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'remote_ssd',
'disk_size': 10, # disk size must be between
# 10 and 1024 GB.
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_G1',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
|
Add Azure disk integration test
Test that we can actually create a disk in an Azure VM. This is very
similar to the GCP and AWS disk integration tests.# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Azure scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class AzureScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for Azure disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPremiumStorage(self):
test_util.assertDiskMounts({
'flags': {
'azure_storage_type': 'PLRS'
},
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_DS2',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'remote_ssd',
'disk_size': 10, # disk size must be between
# 10 and 1024 GB.
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_G1',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
|
<commit_before><commit_msg>Add Azure disk integration test
Test that we can actually create a disk in an Azure VM. This is very
similar to the GCP and AWS disk integration tests.<commit_after># Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for Azure scratch disks."""
import os
import unittest
from perfkitbenchmarker import pkb
from perfkitbenchmarker import test_util
MOUNT_POINT = '/scratch'
@unittest.skipUnless('PERFKIT_INTEGRATION' in os.environ,
'PERFKIT_INTEGRATION not in environment')
class AzureScratchDiskIntegrationTest(unittest.TestCase):
"""Integration tests for Azure disks.
Please see the section on integration testing in the README.
"""
def setUp(self):
pkb.SetUpPKB()
def testPremiumStorage(self):
test_util.assertDiskMounts({
'flags': {
'azure_storage_type': 'PLRS'
},
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_DS2',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'remote_ssd',
'disk_size': 10, # disk size must be between
# 10 and 1024 GB.
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
def testLocalSSD(self):
test_util.assertDiskMounts({
'vm_groups': {
'vm_group_1': {
'cloud': 'Azure',
'vm_spec': {
'Azure': {
'machine_type': 'Standard_G1',
'zone': 'East US 2'
}
},
'disk_spec': {
'Azure': {
'disk_type': 'local',
'mount_point': MOUNT_POINT
}
}
}
}
}, MOUNT_POINT)
|
|
a79c82b58443082827d8817ec15ae1283e5c0e21
|
tests/test_string_template.py
|
tests/test_string_template.py
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
Add tests for string.Template engine.
|
Add tests for string.Template engine.
|
Python
|
mit
|
blubberdiblub/eztemplate
|
Add tests for string.Template engine.
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for string.Template engine.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
Add tests for string.Template engine.#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for string.Template engine.<commit_after>#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
|
22a96b46a5fdcd0359febdeb8372e6655087674d
|
tests/test_filters.py
|
tests/test_filters.py
|
from nose.tools import assert_raises
from helpers import TempEnvironmentHelper
class TestFilters(TempEnvironmentHelper):
def test_pyscss(self):
"""This filter used to make trouble because if required
Environment.url and Environment.directory to be set.
"""
self.create_files({'foo': ''})
bundle = self.mkbundle('foo', filters='pyscss', output='out')
# By default we'd get an error, because the filter can't use
# any good defaults.
assert_raises(EnvironmentError, bundle.build)
# If we set a director/url pair, it works.
self.env.config['PYSCSS_STATIC_ROOT'] = 'a'
self.env.config['PYSCSS_STATIC_URL'] = 'b'
bundle.build()
|
Add a test for the PyScss filter.
|
Add a test for the PyScss filter.
The new webassets master adds configuration values that make the filter
usable with Flask-Assets.
|
Python
|
bsd-2-clause
|
miracle2k/flask-assets,0x1997/flask-assets,0x1997/flask-assets,miracle2k/flask-assets
|
Add a test for the PyScss filter.
The new webassets master adds configuration values that make the filter
usable with Flask-Assets.
|
from nose.tools import assert_raises
from helpers import TempEnvironmentHelper
class TestFilters(TempEnvironmentHelper):
def test_pyscss(self):
"""This filter used to make trouble because if required
Environment.url and Environment.directory to be set.
"""
self.create_files({'foo': ''})
bundle = self.mkbundle('foo', filters='pyscss', output='out')
# By default we'd get an error, because the filter can't use
# any good defaults.
assert_raises(EnvironmentError, bundle.build)
# If we set a director/url pair, it works.
self.env.config['PYSCSS_STATIC_ROOT'] = 'a'
self.env.config['PYSCSS_STATIC_URL'] = 'b'
bundle.build()
|
<commit_before><commit_msg>Add a test for the PyScss filter.
The new webassets master adds configuration values that make the filter
usable with Flask-Assets.<commit_after>
|
from nose.tools import assert_raises
from helpers import TempEnvironmentHelper
class TestFilters(TempEnvironmentHelper):
def test_pyscss(self):
"""This filter used to make trouble because if required
Environment.url and Environment.directory to be set.
"""
self.create_files({'foo': ''})
bundle = self.mkbundle('foo', filters='pyscss', output='out')
# By default we'd get an error, because the filter can't use
# any good defaults.
assert_raises(EnvironmentError, bundle.build)
# If we set a director/url pair, it works.
self.env.config['PYSCSS_STATIC_ROOT'] = 'a'
self.env.config['PYSCSS_STATIC_URL'] = 'b'
bundle.build()
|
Add a test for the PyScss filter.
The new webassets master adds configuration values that make the filter
usable with Flask-Assets.from nose.tools import assert_raises
from helpers import TempEnvironmentHelper
class TestFilters(TempEnvironmentHelper):
def test_pyscss(self):
"""This filter used to make trouble because if required
Environment.url and Environment.directory to be set.
"""
self.create_files({'foo': ''})
bundle = self.mkbundle('foo', filters='pyscss', output='out')
# By default we'd get an error, because the filter can't use
# any good defaults.
assert_raises(EnvironmentError, bundle.build)
# If we set a director/url pair, it works.
self.env.config['PYSCSS_STATIC_ROOT'] = 'a'
self.env.config['PYSCSS_STATIC_URL'] = 'b'
bundle.build()
|
<commit_before><commit_msg>Add a test for the PyScss filter.
The new webassets master adds configuration values that make the filter
usable with Flask-Assets.<commit_after>from nose.tools import assert_raises
from helpers import TempEnvironmentHelper
class TestFilters(TempEnvironmentHelper):
def test_pyscss(self):
"""This filter used to make trouble because if required
Environment.url and Environment.directory to be set.
"""
self.create_files({'foo': ''})
bundle = self.mkbundle('foo', filters='pyscss', output='out')
# By default we'd get an error, because the filter can't use
# any good defaults.
assert_raises(EnvironmentError, bundle.build)
# If we set a director/url pair, it works.
self.env.config['PYSCSS_STATIC_ROOT'] = 'a'
self.env.config['PYSCSS_STATIC_URL'] = 'b'
bundle.build()
|
|
b826571c35de75a62d23b2b92530508a9466b7d0
|
tests/test_simdata.py
|
tests/test_simdata.py
|
import nose
import cle
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries', 'tests'))
def test_progname():
filename = os.path.join(test_location, 'x86_64', 'cat')
ld = cle.Loader(filename, auto_load_libs=False)
progname_ptr_symbol = ld.find_symbol('__progname')
progname_ptr = ld.memory.unpack_word(progname_ptr_symbol.rebased_addr)
nose.tools.assert_not_equal(progname_ptr, 0)
progname = ld.memory.load(progname_ptr, 8)
nose.tools.assert_equal(progname, b'program\0')
def test_got_relocation():
filename = os.path.join(test_location, 'x86_64', 'multiarch_main_main.o')
ld = cle.Loader(filename)
reloc = ld.main_object.relocs[1]
nose.tools.assert_equal(reloc.symbol.name, 'vex_failure_exit') # this should never fail
nose.tools.assert_equal(reloc.symbol.resolvedby.name, 'got.vex_failure_exit')
ptr = ld.memory.unpack_word(reloc.symbol.resolvedby.rebased_addr)
final_symbol = ld.find_symbol(ptr)
nose.tools.assert_is_not(final_symbol, None)
nose.tools.assert_equal(final_symbol.name, 'vex_failure_exit')
nose.tools.assert_true(final_symbol.is_extern)
if __name__ == '__main__':
test_progname()
test_got_relocation()
|
Add test for simdata and GOT relocations
|
Add test for simdata and GOT relocations
|
Python
|
bsd-2-clause
|
angr/cle
|
Add test for simdata and GOT relocations
|
import nose
import cle
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries', 'tests'))
def test_progname():
filename = os.path.join(test_location, 'x86_64', 'cat')
ld = cle.Loader(filename, auto_load_libs=False)
progname_ptr_symbol = ld.find_symbol('__progname')
progname_ptr = ld.memory.unpack_word(progname_ptr_symbol.rebased_addr)
nose.tools.assert_not_equal(progname_ptr, 0)
progname = ld.memory.load(progname_ptr, 8)
nose.tools.assert_equal(progname, b'program\0')
def test_got_relocation():
filename = os.path.join(test_location, 'x86_64', 'multiarch_main_main.o')
ld = cle.Loader(filename)
reloc = ld.main_object.relocs[1]
nose.tools.assert_equal(reloc.symbol.name, 'vex_failure_exit') # this should never fail
nose.tools.assert_equal(reloc.symbol.resolvedby.name, 'got.vex_failure_exit')
ptr = ld.memory.unpack_word(reloc.symbol.resolvedby.rebased_addr)
final_symbol = ld.find_symbol(ptr)
nose.tools.assert_is_not(final_symbol, None)
nose.tools.assert_equal(final_symbol.name, 'vex_failure_exit')
nose.tools.assert_true(final_symbol.is_extern)
if __name__ == '__main__':
test_progname()
test_got_relocation()
|
<commit_before><commit_msg>Add test for simdata and GOT relocations<commit_after>
|
import nose
import cle
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries', 'tests'))
def test_progname():
filename = os.path.join(test_location, 'x86_64', 'cat')
ld = cle.Loader(filename, auto_load_libs=False)
progname_ptr_symbol = ld.find_symbol('__progname')
progname_ptr = ld.memory.unpack_word(progname_ptr_symbol.rebased_addr)
nose.tools.assert_not_equal(progname_ptr, 0)
progname = ld.memory.load(progname_ptr, 8)
nose.tools.assert_equal(progname, b'program\0')
def test_got_relocation():
filename = os.path.join(test_location, 'x86_64', 'multiarch_main_main.o')
ld = cle.Loader(filename)
reloc = ld.main_object.relocs[1]
nose.tools.assert_equal(reloc.symbol.name, 'vex_failure_exit') # this should never fail
nose.tools.assert_equal(reloc.symbol.resolvedby.name, 'got.vex_failure_exit')
ptr = ld.memory.unpack_word(reloc.symbol.resolvedby.rebased_addr)
final_symbol = ld.find_symbol(ptr)
nose.tools.assert_is_not(final_symbol, None)
nose.tools.assert_equal(final_symbol.name, 'vex_failure_exit')
nose.tools.assert_true(final_symbol.is_extern)
if __name__ == '__main__':
test_progname()
test_got_relocation()
|
Add test for simdata and GOT relocationsimport nose
import cle
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries', 'tests'))
def test_progname():
filename = os.path.join(test_location, 'x86_64', 'cat')
ld = cle.Loader(filename, auto_load_libs=False)
progname_ptr_symbol = ld.find_symbol('__progname')
progname_ptr = ld.memory.unpack_word(progname_ptr_symbol.rebased_addr)
nose.tools.assert_not_equal(progname_ptr, 0)
progname = ld.memory.load(progname_ptr, 8)
nose.tools.assert_equal(progname, b'program\0')
def test_got_relocation():
filename = os.path.join(test_location, 'x86_64', 'multiarch_main_main.o')
ld = cle.Loader(filename)
reloc = ld.main_object.relocs[1]
nose.tools.assert_equal(reloc.symbol.name, 'vex_failure_exit') # this should never fail
nose.tools.assert_equal(reloc.symbol.resolvedby.name, 'got.vex_failure_exit')
ptr = ld.memory.unpack_word(reloc.symbol.resolvedby.rebased_addr)
final_symbol = ld.find_symbol(ptr)
nose.tools.assert_is_not(final_symbol, None)
nose.tools.assert_equal(final_symbol.name, 'vex_failure_exit')
nose.tools.assert_true(final_symbol.is_extern)
if __name__ == '__main__':
test_progname()
test_got_relocation()
|
<commit_before><commit_msg>Add test for simdata and GOT relocations<commit_after>import nose
import cle
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries', 'tests'))
def test_progname():
filename = os.path.join(test_location, 'x86_64', 'cat')
ld = cle.Loader(filename, auto_load_libs=False)
progname_ptr_symbol = ld.find_symbol('__progname')
progname_ptr = ld.memory.unpack_word(progname_ptr_symbol.rebased_addr)
nose.tools.assert_not_equal(progname_ptr, 0)
progname = ld.memory.load(progname_ptr, 8)
nose.tools.assert_equal(progname, b'program\0')
def test_got_relocation():
filename = os.path.join(test_location, 'x86_64', 'multiarch_main_main.o')
ld = cle.Loader(filename)
reloc = ld.main_object.relocs[1]
nose.tools.assert_equal(reloc.symbol.name, 'vex_failure_exit') # this should never fail
nose.tools.assert_equal(reloc.symbol.resolvedby.name, 'got.vex_failure_exit')
ptr = ld.memory.unpack_word(reloc.symbol.resolvedby.rebased_addr)
final_symbol = ld.find_symbol(ptr)
nose.tools.assert_is_not(final_symbol, None)
nose.tools.assert_equal(final_symbol.name, 'vex_failure_exit')
nose.tools.assert_true(final_symbol.is_extern)
if __name__ == '__main__':
test_progname()
test_got_relocation()
|
|
abb2bf0cfff38a94392d32a68cec912da09f2f79
|
test/part_gpt.py
|
test/part_gpt.py
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
if __name__ == '__main__':
subprocess.call("umount", '/mnt/resource')
subprocess.call("umount", '/mnt')
subprocess.call('parted', '/dev/sdb', 'print')
subprocess.call('parted', '/dev/sdb', 'rm', '1')
subprocess.call('parted', '/dev/sdb', 'mklabel', 'gpt')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '0%', '50%')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '50%', '100%')
|
Add test script to create gpt partition for test
|
Add test script to create gpt partition for test
|
Python
|
apache-2.0
|
yuezh/WALinuxAgent,thomas1206/WALinuxAgent,yuezh/WALinuxAgent,lizzha/WALinuxAgent,imikushin/WALinuxAgent,jerickso/WALinuxAgent,karataliu/WALinuxAgent,SuperScottz/WALinuxAgent,AbelHu/WALinuxAgent,AbelHu/WALinuxAgent,thomas1206/WALinuxAgent,SuperScottz/WALinuxAgent,fieryorc/WALinuxAgent,jerickso/WALinuxAgent,lizzha/WALinuxAgent,ryanmiao/WALinuxAgent,fieryorc/WALinuxAgent,karataliu/WALinuxAgent,imikushin/WALinuxAgent,ryanmiao/WALinuxAgent
|
Add test script to create gpt partition for test
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
if __name__ == '__main__':
subprocess.call("umount", '/mnt/resource')
subprocess.call("umount", '/mnt')
subprocess.call('parted', '/dev/sdb', 'print')
subprocess.call('parted', '/dev/sdb', 'rm', '1')
subprocess.call('parted', '/dev/sdb', 'mklabel', 'gpt')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '0%', '50%')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '50%', '100%')
|
<commit_before><commit_msg>Add test script to create gpt partition for test<commit_after>
|
#!/usr/bin/env python
#
# Windows Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
if __name__ == '__main__':
subprocess.call("umount", '/mnt/resource')
subprocess.call("umount", '/mnt')
subprocess.call('parted', '/dev/sdb', 'print')
subprocess.call('parted', '/dev/sdb', 'rm', '1')
subprocess.call('parted', '/dev/sdb', 'mklabel', 'gpt')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '0%', '50%')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '50%', '100%')
|
Add test script to create gpt partition for test#!/usr/bin/env python
#
# Windows Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
if __name__ == '__main__':
subprocess.call("umount", '/mnt/resource')
subprocess.call("umount", '/mnt')
subprocess.call('parted', '/dev/sdb', 'print')
subprocess.call('parted', '/dev/sdb', 'rm', '1')
subprocess.call('parted', '/dev/sdb', 'mklabel', 'gpt')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '0%', '50%')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '50%', '100%')
|
<commit_before><commit_msg>Add test script to create gpt partition for test<commit_after>#!/usr/bin/env python
#
# Windows Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
if __name__ == '__main__':
subprocess.call("umount", '/mnt/resource')
subprocess.call("umount", '/mnt')
subprocess.call('parted', '/dev/sdb', 'print')
subprocess.call('parted', '/dev/sdb', 'rm', '1')
subprocess.call('parted', '/dev/sdb', 'mklabel', 'gpt')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '0%', '50%')
subprocess.call('parted', '/dev/sdb', 'mkpart', 'primary', '50%', '100%')
|
|
61b7ccb6b48e8894ab9212d88c58a49423aacc33
|
nap/utils/ripper.py
|
nap/utils/ripper.py
|
'''
Extremely light-weight serialiser for very simple cases.
'''
from operator import attrgetter
from collections import namedtuple
class Ripper(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs.setdefault(arg, arg)
self.getter = attrgetter(kwargs.values())
self.tup = namedtuple('tup', kwargs.keys())
def __call__(self, obj):
return self.tup._make(self.getter(obj))._asdict()
|
Add Ripper class to utils
|
Add Ripper class to utils
|
Python
|
bsd-3-clause
|
limbera/django-nap
|
Add Ripper class to utils
|
'''
Extremely light-weight serialiser for very simple cases.
'''
from operator import attrgetter
from collections import namedtuple
class Ripper(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs.setdefault(arg, arg)
self.getter = attrgetter(kwargs.values())
self.tup = namedtuple('tup', kwargs.keys())
def __call__(self, obj):
return self.tup._make(self.getter(obj))._asdict()
|
<commit_before><commit_msg>Add Ripper class to utils<commit_after>
|
'''
Extremely light-weight serialiser for very simple cases.
'''
from operator import attrgetter
from collections import namedtuple
class Ripper(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs.setdefault(arg, arg)
self.getter = attrgetter(kwargs.values())
self.tup = namedtuple('tup', kwargs.keys())
def __call__(self, obj):
return self.tup._make(self.getter(obj))._asdict()
|
Add Ripper class to utils'''
Extremely light-weight serialiser for very simple cases.
'''
from operator import attrgetter
from collections import namedtuple
class Ripper(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs.setdefault(arg, arg)
self.getter = attrgetter(kwargs.values())
self.tup = namedtuple('tup', kwargs.keys())
def __call__(self, obj):
return self.tup._make(self.getter(obj))._asdict()
|
<commit_before><commit_msg>Add Ripper class to utils<commit_after>'''
Extremely light-weight serialiser for very simple cases.
'''
from operator import attrgetter
from collections import namedtuple
class Ripper(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs.setdefault(arg, arg)
self.getter = attrgetter(kwargs.values())
self.tup = namedtuple('tup', kwargs.keys())
def __call__(self, obj):
return self.tup._make(self.getter(obj))._asdict()
|
|
739be6f9d80b95fc4c0918f64be8438eb04736a1
|
manage.py
|
manage.py
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
Add database migrations and creation script
|
Add database migrations and creation script
|
Python
|
mit
|
brayoh/bucket-list-api
|
Add database migrations and creation script
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
<commit_before><commit_msg>Add database migrations and creation script<commit_after>
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
Add database migrations and creation scriptfrom flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
<commit_before><commit_msg>Add database migrations and creation script<commit_after>from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
|
f61ad947c8fc38201da452d2cfd7d4ce9540912f
|
scripts/export_glclient_error_codes.py
|
scripts/export_glclient_error_codes.py
|
import inspect
import string
import sys
import os
globaleaks_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(globaleaks_path)
from globaleaks.rest import errors
def return_exception(klass):
reason_string = ""
arguments = inspect.getargspec(klass.__init__).args
arguments.remove("self")
if len(arguments) > 0:
dummy_args = []
for _ in arguments:
dummy_args.append("REPLACE_ME")
kinstance = klass(*dummy_args)
reason_string = kinstance.reason
else:
reason_string = klass.reason
return {klass.error_code: reason_string}
exceptions = []
for attr in dir(errors):
klass = getattr(errors, attr)
if inspect.isclass(klass) and issubclass(klass, errors.GLException):
exceptions.append(return_exception(klass))
switch_case = """
<div ng-switch on="error.code">
%s
</div>
"""
switch_cases = ""
for exception in exceptions:
code, exception_str = exception.items()[0]
exception_strings = ""
translate = '{{ "%s" | translate }}'
argument_count = len(exception_str.split("REPLACE_ME")) - 1
idx = 0
for reason_string in exception_str.split("REPLACE_ME"):
reason_string = reason_string.strip()
skip = False
if string == "":
skip = True
if not skip and any(c in string.ascii_letters for c in reason_string):
exception_strings += translate % reason_string
elif not skip:
exception_strings += reason_string
if argument_count > idx:
exception_strings += "{{error.arguments[%s]}}" % idx
idx += 1
switch_cases += '<div ng-switch-when="%s">\n%s\n</div>\n\n' % (code, exception_strings)
print switch_case % switch_cases
|
Add script for generating error code handling in GLCLient
|
Add script for generating error code handling in GLCLient
|
Python
|
agpl-3.0
|
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
|
Add script for generating error code handling in GLCLient
|
import inspect
import string
import sys
import os
globaleaks_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(globaleaks_path)
from globaleaks.rest import errors
def return_exception(klass):
reason_string = ""
arguments = inspect.getargspec(klass.__init__).args
arguments.remove("self")
if len(arguments) > 0:
dummy_args = []
for _ in arguments:
dummy_args.append("REPLACE_ME")
kinstance = klass(*dummy_args)
reason_string = kinstance.reason
else:
reason_string = klass.reason
return {klass.error_code: reason_string}
exceptions = []
for attr in dir(errors):
klass = getattr(errors, attr)
if inspect.isclass(klass) and issubclass(klass, errors.GLException):
exceptions.append(return_exception(klass))
switch_case = """
<div ng-switch on="error.code">
%s
</div>
"""
switch_cases = ""
for exception in exceptions:
code, exception_str = exception.items()[0]
exception_strings = ""
translate = '{{ "%s" | translate }}'
argument_count = len(exception_str.split("REPLACE_ME")) - 1
idx = 0
for reason_string in exception_str.split("REPLACE_ME"):
reason_string = reason_string.strip()
skip = False
if string == "":
skip = True
if not skip and any(c in string.ascii_letters for c in reason_string):
exception_strings += translate % reason_string
elif not skip:
exception_strings += reason_string
if argument_count > idx:
exception_strings += "{{error.arguments[%s]}}" % idx
idx += 1
switch_cases += '<div ng-switch-when="%s">\n%s\n</div>\n\n' % (code, exception_strings)
print switch_case % switch_cases
|
<commit_before><commit_msg>Add script for generating error code handling in GLCLient<commit_after>
|
import inspect
import string
import sys
import os
globaleaks_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(globaleaks_path)
from globaleaks.rest import errors
def return_exception(klass):
reason_string = ""
arguments = inspect.getargspec(klass.__init__).args
arguments.remove("self")
if len(arguments) > 0:
dummy_args = []
for _ in arguments:
dummy_args.append("REPLACE_ME")
kinstance = klass(*dummy_args)
reason_string = kinstance.reason
else:
reason_string = klass.reason
return {klass.error_code: reason_string}
exceptions = []
for attr in dir(errors):
klass = getattr(errors, attr)
if inspect.isclass(klass) and issubclass(klass, errors.GLException):
exceptions.append(return_exception(klass))
switch_case = """
<div ng-switch on="error.code">
%s
</div>
"""
switch_cases = ""
for exception in exceptions:
code, exception_str = exception.items()[0]
exception_strings = ""
translate = '{{ "%s" | translate }}'
argument_count = len(exception_str.split("REPLACE_ME")) - 1
idx = 0
for reason_string in exception_str.split("REPLACE_ME"):
reason_string = reason_string.strip()
skip = False
if string == "":
skip = True
if not skip and any(c in string.ascii_letters for c in reason_string):
exception_strings += translate % reason_string
elif not skip:
exception_strings += reason_string
if argument_count > idx:
exception_strings += "{{error.arguments[%s]}}" % idx
idx += 1
switch_cases += '<div ng-switch-when="%s">\n%s\n</div>\n\n' % (code, exception_strings)
print switch_case % switch_cases
|
Add script for generating error code handling in GLCLientimport inspect
import string
import sys
import os
globaleaks_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(globaleaks_path)
from globaleaks.rest import errors
def return_exception(klass):
reason_string = ""
arguments = inspect.getargspec(klass.__init__).args
arguments.remove("self")
if len(arguments) > 0:
dummy_args = []
for _ in arguments:
dummy_args.append("REPLACE_ME")
kinstance = klass(*dummy_args)
reason_string = kinstance.reason
else:
reason_string = klass.reason
return {klass.error_code: reason_string}
exceptions = []
for attr in dir(errors):
klass = getattr(errors, attr)
if inspect.isclass(klass) and issubclass(klass, errors.GLException):
exceptions.append(return_exception(klass))
switch_case = """
<div ng-switch on="error.code">
%s
</div>
"""
switch_cases = ""
for exception in exceptions:
code, exception_str = exception.items()[0]
exception_strings = ""
translate = '{{ "%s" | translate }}'
argument_count = len(exception_str.split("REPLACE_ME")) - 1
idx = 0
for reason_string in exception_str.split("REPLACE_ME"):
reason_string = reason_string.strip()
skip = False
if string == "":
skip = True
if not skip and any(c in string.ascii_letters for c in reason_string):
exception_strings += translate % reason_string
elif not skip:
exception_strings += reason_string
if argument_count > idx:
exception_strings += "{{error.arguments[%s]}}" % idx
idx += 1
switch_cases += '<div ng-switch-when="%s">\n%s\n</div>\n\n' % (code, exception_strings)
print switch_case % switch_cases
|
<commit_before><commit_msg>Add script for generating error code handling in GLCLient<commit_after>import inspect
import string
import sys
import os
globaleaks_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.append(globaleaks_path)
from globaleaks.rest import errors
def return_exception(klass):
reason_string = ""
arguments = inspect.getargspec(klass.__init__).args
arguments.remove("self")
if len(arguments) > 0:
dummy_args = []
for _ in arguments:
dummy_args.append("REPLACE_ME")
kinstance = klass(*dummy_args)
reason_string = kinstance.reason
else:
reason_string = klass.reason
return {klass.error_code: reason_string}
exceptions = []
for attr in dir(errors):
klass = getattr(errors, attr)
if inspect.isclass(klass) and issubclass(klass, errors.GLException):
exceptions.append(return_exception(klass))
switch_case = """
<div ng-switch on="error.code">
%s
</div>
"""
switch_cases = ""
for exception in exceptions:
code, exception_str = exception.items()[0]
exception_strings = ""
translate = '{{ "%s" | translate }}'
argument_count = len(exception_str.split("REPLACE_ME")) - 1
idx = 0
for reason_string in exception_str.split("REPLACE_ME"):
reason_string = reason_string.strip()
skip = False
if string == "":
skip = True
if not skip and any(c in string.ascii_letters for c in reason_string):
exception_strings += translate % reason_string
elif not skip:
exception_strings += reason_string
if argument_count > idx:
exception_strings += "{{error.arguments[%s]}}" % idx
idx += 1
switch_cases += '<div ng-switch-when="%s">\n%s\n</div>\n\n' % (code, exception_strings)
print switch_case % switch_cases
|
|
0b95e92c41cf9a0a266912e9db081aa7f6bc99d7
|
scripts/lwtnn-test-keras-functional.py
|
scripts/lwtnn-test-keras-functional.py
|
#!/usr/bin/env python3
"""
Run Keras as a compainion to lwtnn-test-lightweight-graph
"""
_help_arch_file = "NN archetecture file from Keras"
_help_vars_file = "Variable description file"
_help_hdf5_file = "Weights file from Keras"
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from numpy import linspace
import numpy as np
import json
def _get_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('archetecture_file', help=_help_arch_file)
parser.add_argument('variables_file', help=_help_vars_file)
parser.add_argument('hdf5_file', help=_help_hdf5_file)
return parser.parse_args()
def run():
args = _get_args()
# keras loads slow, do the loading here
from keras.models import model_from_json
with open(args.archetecture_file) as arch:
model = model_from_json(''.join(arch.readlines()))
model.load_weights(args.hdf5_file)
with open(args.variables_file) as variables_file:
inputs = json.loads(''.join(variables_file.readlines()))
full_test_pattern = []
for in_node, in_spec in zip(model.inputs, inputs['inputs']):
n_inputs = in_node.shape[1]
assert n_inputs == len(in_spec['variables'])
test_pattern = linspace(-1,1,n_inputs)[None,:]
full_test_pattern.append(test_pattern)
outputs = model.predict(full_test_pattern)
for out_node, out_spec in zip(outputs, inputs['outputs']):
out_pairs = sorted(zip(out_spec['labels'], out_node))
print('{}:'.format(out_spec['name']))
for name, val in out_pairs:
print('{} {}'.format(name, val))
if __name__ == '__main__':
run()
|
Add a script to test keras functional models
|
Add a script to test keras functional models
We have a C++ script to test functional models using lwtnn, this adds
a short script to test the same model in Keras. At the moment it only
tests a dummy "ramp" pattern, i.e. np.linspace(-1, 1, n_features).
|
Python
|
mit
|
lwtnn/lwtnn,lwtnn/lwtnn,lwtnn/lwtnn
|
Add a script to test keras functional models
We have a C++ script to test functional models using lwtnn, this adds
a short script to test the same model in Keras. At the moment it only
tests a dummy "ramp" pattern, i.e. np.linspace(-1, 1, n_features).
|
#!/usr/bin/env python3
"""
Run Keras as a compainion to lwtnn-test-lightweight-graph
"""
_help_arch_file = "NN archetecture file from Keras"
_help_vars_file = "Variable description file"
_help_hdf5_file = "Weights file from Keras"
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from numpy import linspace
import numpy as np
import json
def _get_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('archetecture_file', help=_help_arch_file)
parser.add_argument('variables_file', help=_help_vars_file)
parser.add_argument('hdf5_file', help=_help_hdf5_file)
return parser.parse_args()
def run():
args = _get_args()
# keras loads slow, do the loading here
from keras.models import model_from_json
with open(args.archetecture_file) as arch:
model = model_from_json(''.join(arch.readlines()))
model.load_weights(args.hdf5_file)
with open(args.variables_file) as variables_file:
inputs = json.loads(''.join(variables_file.readlines()))
full_test_pattern = []
for in_node, in_spec in zip(model.inputs, inputs['inputs']):
n_inputs = in_node.shape[1]
assert n_inputs == len(in_spec['variables'])
test_pattern = linspace(-1,1,n_inputs)[None,:]
full_test_pattern.append(test_pattern)
outputs = model.predict(full_test_pattern)
for out_node, out_spec in zip(outputs, inputs['outputs']):
out_pairs = sorted(zip(out_spec['labels'], out_node))
print('{}:'.format(out_spec['name']))
for name, val in out_pairs:
print('{} {}'.format(name, val))
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add a script to test keras functional models
We have a C++ script to test functional models using lwtnn, this adds
a short script to test the same model in Keras. At the moment it only
tests a dummy "ramp" pattern, i.e. np.linspace(-1, 1, n_features).<commit_after>
|
#!/usr/bin/env python3
"""
Run Keras as a compainion to lwtnn-test-lightweight-graph
"""
_help_arch_file = "NN archetecture file from Keras"
_help_vars_file = "Variable description file"
_help_hdf5_file = "Weights file from Keras"
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from numpy import linspace
import numpy as np
import json
def _get_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('archetecture_file', help=_help_arch_file)
parser.add_argument('variables_file', help=_help_vars_file)
parser.add_argument('hdf5_file', help=_help_hdf5_file)
return parser.parse_args()
def run():
args = _get_args()
# keras loads slow, do the loading here
from keras.models import model_from_json
with open(args.archetecture_file) as arch:
model = model_from_json(''.join(arch.readlines()))
model.load_weights(args.hdf5_file)
with open(args.variables_file) as variables_file:
inputs = json.loads(''.join(variables_file.readlines()))
full_test_pattern = []
for in_node, in_spec in zip(model.inputs, inputs['inputs']):
n_inputs = in_node.shape[1]
assert n_inputs == len(in_spec['variables'])
test_pattern = linspace(-1,1,n_inputs)[None,:]
full_test_pattern.append(test_pattern)
outputs = model.predict(full_test_pattern)
for out_node, out_spec in zip(outputs, inputs['outputs']):
out_pairs = sorted(zip(out_spec['labels'], out_node))
print('{}:'.format(out_spec['name']))
for name, val in out_pairs:
print('{} {}'.format(name, val))
if __name__ == '__main__':
run()
|
Add a script to test keras functional models
We have a C++ script to test functional models using lwtnn, this adds
a short script to test the same model in Keras. At the moment it only
tests a dummy "ramp" pattern, i.e. np.linspace(-1, 1, n_features).#!/usr/bin/env python3
"""
Run Keras as a compainion to lwtnn-test-lightweight-graph
"""
_help_arch_file = "NN archetecture file from Keras"
_help_vars_file = "Variable description file"
_help_hdf5_file = "Weights file from Keras"
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from numpy import linspace
import numpy as np
import json
def _get_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('archetecture_file', help=_help_arch_file)
parser.add_argument('variables_file', help=_help_vars_file)
parser.add_argument('hdf5_file', help=_help_hdf5_file)
return parser.parse_args()
def run():
args = _get_args()
# keras loads slow, do the loading here
from keras.models import model_from_json
with open(args.archetecture_file) as arch:
model = model_from_json(''.join(arch.readlines()))
model.load_weights(args.hdf5_file)
with open(args.variables_file) as variables_file:
inputs = json.loads(''.join(variables_file.readlines()))
full_test_pattern = []
for in_node, in_spec in zip(model.inputs, inputs['inputs']):
n_inputs = in_node.shape[1]
assert n_inputs == len(in_spec['variables'])
test_pattern = linspace(-1,1,n_inputs)[None,:]
full_test_pattern.append(test_pattern)
outputs = model.predict(full_test_pattern)
for out_node, out_spec in zip(outputs, inputs['outputs']):
out_pairs = sorted(zip(out_spec['labels'], out_node))
print('{}:'.format(out_spec['name']))
for name, val in out_pairs:
print('{} {}'.format(name, val))
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add a script to test keras functional models
We have a C++ script to test functional models using lwtnn, this adds
a short script to test the same model in Keras. At the moment it only
tests a dummy "ramp" pattern, i.e. np.linspace(-1, 1, n_features).<commit_after>#!/usr/bin/env python3
"""
Run Keras as a compainion to lwtnn-test-lightweight-graph
"""
_help_arch_file = "NN archetecture file from Keras"
_help_vars_file = "Variable description file"
_help_hdf5_file = "Weights file from Keras"
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from numpy import linspace
import numpy as np
import json
def _get_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('archetecture_file', help=_help_arch_file)
parser.add_argument('variables_file', help=_help_vars_file)
parser.add_argument('hdf5_file', help=_help_hdf5_file)
return parser.parse_args()
def run():
args = _get_args()
# keras loads slow, do the loading here
from keras.models import model_from_json
with open(args.archetecture_file) as arch:
model = model_from_json(''.join(arch.readlines()))
model.load_weights(args.hdf5_file)
with open(args.variables_file) as variables_file:
inputs = json.loads(''.join(variables_file.readlines()))
full_test_pattern = []
for in_node, in_spec in zip(model.inputs, inputs['inputs']):
n_inputs = in_node.shape[1]
assert n_inputs == len(in_spec['variables'])
test_pattern = linspace(-1,1,n_inputs)[None,:]
full_test_pattern.append(test_pattern)
outputs = model.predict(full_test_pattern)
for out_node, out_spec in zip(outputs, inputs['outputs']):
out_pairs = sorted(zip(out_spec['labels'], out_node))
print('{}:'.format(out_spec['name']))
for name, val in out_pairs:
print('{} {}'.format(name, val))
if __name__ == '__main__':
run()
|
|
9e5b505cfa4c3f1bfca09588c14219fc1d71c009
|
modules/probablity_manipulation.py
|
modules/probablity_manipulation.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:12:53 2017
@author: daniele
"""
import numpy as np
def minmaxlog(value):
"""
Rounds the value as max(min(value,1−10^{-15},10^{-15})), wrapped by the
natural logarithm. This is useful for calculating the cross-entropy on
(regulated) probabilites.
"""
return np.log(max(min(value, 1-10**(-15)), 10**(-15)))
def repeat_array(array_to_repeat, num_repeats):
"""
Helper function to oversample_array. Concatenates array_to_repeat to
itself, num_repeats times.
"""
return np.concatenate(tuple([array_to_repeat
for kk in range(num_repeats)]))
def agnosticize(probs, times):
"""
Average probability with completely agnostic probabilities, a
user-specified number of times.
Input:
probs: probabilities to average with agnostic probabilities
times: number of times to average
Returns:
mean_probs: the averaged probabilities
"""
agnostic_prob = np.full(len(probs), 1. / len(probs))
probs_to_average = np.concatenate((repeat_array([agnostic_prob], times),
[probs]))
mean_probs = average_probabilities(probs_to_average)
return mean_probs
def average_probabilities(list_of_probabilities):
"""
Convenienvce function for taking a list of of probabilities of the form
[probabilities1, probabilities2,...] and returns their average probability.
"""
return np.mean(list_of_probabilities, axis=0)
def multiply_probabilities(list_of_probabilities):
"""
Multiplies label-probabilities together elementwise, and then normalizes
them. E.g. two input-probabilities, each with a single element,
[[[0.1, 0.2, 0.7]], [[0.8, 0.1, 0.1]]] gives [[0.47059, 0.11765, 0.41176]].
"""
multiplied_probs = reduce(lambda x, y: x*y, list_of_probabilities)
sum_probs = np.sum(multiplied_probs, axis=1)
normalized_probs = np.array([prob / psum
for prob, psum in zip(multiplied_probs,
sum_probs)])
return normalized_probs
def compute_loss(probabilities, labels):
"""
Comutes the mean cross-entropy loss given probabilities and the correct
labels.
"""
vlog = np.vectorize(minmaxlog)
log_probas = vlog(probabilities)
loss = - np.mean(log_probas * labels)
return loss
|
Include module for probability manipulations
|
feat: Include module for probability manipulations
|
Python
|
mit
|
dangall/Kaggle-MobileODT-Cancer-Screening
|
feat: Include module for probability manipulations
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:12:53 2017
@author: daniele
"""
import numpy as np
def minmaxlog(value):
"""
Rounds the value as max(min(value,1−10^{-15},10^{-15})), wrapped by the
natural logarithm. This is useful for calculating the cross-entropy on
(regulated) probabilites.
"""
return np.log(max(min(value, 1-10**(-15)), 10**(-15)))
def repeat_array(array_to_repeat, num_repeats):
"""
Helper function to oversample_array. Concatenates array_to_repeat to
itself, num_repeats times.
"""
return np.concatenate(tuple([array_to_repeat
for kk in range(num_repeats)]))
def agnosticize(probs, times):
"""
Average probability with completely agnostic probabilities, a
user-specified number of times.
Input:
probs: probabilities to average with agnostic probabilities
times: number of times to average
Returns:
mean_probs: the averaged probabilities
"""
agnostic_prob = np.full(len(probs), 1. / len(probs))
probs_to_average = np.concatenate((repeat_array([agnostic_prob], times),
[probs]))
mean_probs = average_probabilities(probs_to_average)
return mean_probs
def average_probabilities(list_of_probabilities):
"""
Convenienvce function for taking a list of of probabilities of the form
[probabilities1, probabilities2,...] and returns their average probability.
"""
return np.mean(list_of_probabilities, axis=0)
def multiply_probabilities(list_of_probabilities):
"""
Multiplies label-probabilities together elementwise, and then normalizes
them. E.g. two input-probabilities, each with a single element,
[[[0.1, 0.2, 0.7]], [[0.8, 0.1, 0.1]]] gives [[0.47059, 0.11765, 0.41176]].
"""
multiplied_probs = reduce(lambda x, y: x*y, list_of_probabilities)
sum_probs = np.sum(multiplied_probs, axis=1)
normalized_probs = np.array([prob / psum
for prob, psum in zip(multiplied_probs,
sum_probs)])
return normalized_probs
def compute_loss(probabilities, labels):
"""
Comutes the mean cross-entropy loss given probabilities and the correct
labels.
"""
vlog = np.vectorize(minmaxlog)
log_probas = vlog(probabilities)
loss = - np.mean(log_probas * labels)
return loss
|
<commit_before><commit_msg>feat: Include module for probability manipulations<commit_after>
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:12:53 2017
@author: daniele
"""
import numpy as np
def minmaxlog(value):
"""
Rounds the value as max(min(value,1−10^{-15},10^{-15})), wrapped by the
natural logarithm. This is useful for calculating the cross-entropy on
(regulated) probabilites.
"""
return np.log(max(min(value, 1-10**(-15)), 10**(-15)))
def repeat_array(array_to_repeat, num_repeats):
"""
Helper function to oversample_array. Concatenates array_to_repeat to
itself, num_repeats times.
"""
return np.concatenate(tuple([array_to_repeat
for kk in range(num_repeats)]))
def agnosticize(probs, times):
"""
Average probability with completely agnostic probabilities, a
user-specified number of times.
Input:
probs: probabilities to average with agnostic probabilities
times: number of times to average
Returns:
mean_probs: the averaged probabilities
"""
agnostic_prob = np.full(len(probs), 1. / len(probs))
probs_to_average = np.concatenate((repeat_array([agnostic_prob], times),
[probs]))
mean_probs = average_probabilities(probs_to_average)
return mean_probs
def average_probabilities(list_of_probabilities):
"""
Convenienvce function for taking a list of of probabilities of the form
[probabilities1, probabilities2,...] and returns their average probability.
"""
return np.mean(list_of_probabilities, axis=0)
def multiply_probabilities(list_of_probabilities):
"""
Multiplies label-probabilities together elementwise, and then normalizes
them. E.g. two input-probabilities, each with a single element,
[[[0.1, 0.2, 0.7]], [[0.8, 0.1, 0.1]]] gives [[0.47059, 0.11765, 0.41176]].
"""
multiplied_probs = reduce(lambda x, y: x*y, list_of_probabilities)
sum_probs = np.sum(multiplied_probs, axis=1)
normalized_probs = np.array([prob / psum
for prob, psum in zip(multiplied_probs,
sum_probs)])
return normalized_probs
def compute_loss(probabilities, labels):
"""
Comutes the mean cross-entropy loss given probabilities and the correct
labels.
"""
vlog = np.vectorize(minmaxlog)
log_probas = vlog(probabilities)
loss = - np.mean(log_probas * labels)
return loss
|
feat: Include module for probability manipulations#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:12:53 2017
@author: daniele
"""
import numpy as np
def minmaxlog(value):
"""
Rounds the value as max(min(value,1−10^{-15},10^{-15})), wrapped by the
natural logarithm. This is useful for calculating the cross-entropy on
(regulated) probabilites.
"""
return np.log(max(min(value, 1-10**(-15)), 10**(-15)))
def repeat_array(array_to_repeat, num_repeats):
"""
Helper function to oversample_array. Concatenates array_to_repeat to
itself, num_repeats times.
"""
return np.concatenate(tuple([array_to_repeat
for kk in range(num_repeats)]))
def agnosticize(probs, times):
"""
Average probability with completely agnostic probabilities, a
user-specified number of times.
Input:
probs: probabilities to average with agnostic probabilities
times: number of times to average
Returns:
mean_probs: the averaged probabilities
"""
agnostic_prob = np.full(len(probs), 1. / len(probs))
probs_to_average = np.concatenate((repeat_array([agnostic_prob], times),
[probs]))
mean_probs = average_probabilities(probs_to_average)
return mean_probs
def average_probabilities(list_of_probabilities):
"""
Convenienvce function for taking a list of of probabilities of the form
[probabilities1, probabilities2,...] and returns their average probability.
"""
return np.mean(list_of_probabilities, axis=0)
def multiply_probabilities(list_of_probabilities):
"""
Multiplies label-probabilities together elementwise, and then normalizes
them. E.g. two input-probabilities, each with a single element,
[[[0.1, 0.2, 0.7]], [[0.8, 0.1, 0.1]]] gives [[0.47059, 0.11765, 0.41176]].
"""
multiplied_probs = reduce(lambda x, y: x*y, list_of_probabilities)
sum_probs = np.sum(multiplied_probs, axis=1)
normalized_probs = np.array([prob / psum
for prob, psum in zip(multiplied_probs,
sum_probs)])
return normalized_probs
def compute_loss(probabilities, labels):
"""
Comutes the mean cross-entropy loss given probabilities and the correct
labels.
"""
vlog = np.vectorize(minmaxlog)
log_probas = vlog(probabilities)
loss = - np.mean(log_probas * labels)
return loss
|
<commit_before><commit_msg>feat: Include module for probability manipulations<commit_after>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 3 12:12:53 2017
@author: daniele
"""
import numpy as np
def minmaxlog(value):
"""
Rounds the value as max(min(value,1−10^{-15},10^{-15})), wrapped by the
natural logarithm. This is useful for calculating the cross-entropy on
(regulated) probabilites.
"""
return np.log(max(min(value, 1-10**(-15)), 10**(-15)))
def repeat_array(array_to_repeat, num_repeats):
"""
Helper function to oversample_array. Concatenates array_to_repeat to
itself, num_repeats times.
"""
return np.concatenate(tuple([array_to_repeat
for kk in range(num_repeats)]))
def agnosticize(probs, times):
"""
Average probability with completely agnostic probabilities, a
user-specified number of times.
Input:
probs: probabilities to average with agnostic probabilities
times: number of times to average
Returns:
mean_probs: the averaged probabilities
"""
agnostic_prob = np.full(len(probs), 1. / len(probs))
probs_to_average = np.concatenate((repeat_array([agnostic_prob], times),
[probs]))
mean_probs = average_probabilities(probs_to_average)
return mean_probs
def average_probabilities(list_of_probabilities):
"""
Convenienvce function for taking a list of of probabilities of the form
[probabilities1, probabilities2,...] and returns their average probability.
"""
return np.mean(list_of_probabilities, axis=0)
def multiply_probabilities(list_of_probabilities):
"""
Multiplies label-probabilities together elementwise, and then normalizes
them. E.g. two input-probabilities, each with a single element,
[[[0.1, 0.2, 0.7]], [[0.8, 0.1, 0.1]]] gives [[0.47059, 0.11765, 0.41176]].
"""
multiplied_probs = reduce(lambda x, y: x*y, list_of_probabilities)
sum_probs = np.sum(multiplied_probs, axis=1)
normalized_probs = np.array([prob / psum
for prob, psum in zip(multiplied_probs,
sum_probs)])
return normalized_probs
def compute_loss(probabilities, labels):
"""
Comutes the mean cross-entropy loss given probabilities and the correct
labels.
"""
vlog = np.vectorize(minmaxlog)
log_probas = vlog(probabilities)
loss = - np.mean(log_probas * labels)
return loss
|
|
ad7cd889a9346763baa802248ab92c771dbd9c6f
|
test/util/test_pivoted_cholesky.py
|
test/util/test_pivoted_cholesky.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from torch.autograd import Variable
from gpytorch.utils import pivoted_cholesky, approx_equal
from gpytorch.kernels import RBFKernel
class TestPivotedCholesky(unittest.TestCase):
def test_pivoted_cholesky(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol.t().matmul(piv_chol)
self.assertTrue(approx_equal(covar_approx, covar_matrix))
def test_solve(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, 1)
rhs_vector = torch.randn(100)
shifted_covar_matrix = covar_matrix + torch.eye(size)
real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector)
approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, 1)
self.assertTrue(approx_equal(approx_solve, real_solve))
if __name__ == '__main__':
unittest.main()
|
Add unit test for pivoted cholesky
|
Add unit test for pivoted cholesky
|
Python
|
mit
|
jrg365/gpytorch,jrg365/gpytorch,jrg365/gpytorch
|
Add unit test for pivoted cholesky
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from torch.autograd import Variable
from gpytorch.utils import pivoted_cholesky, approx_equal
from gpytorch.kernels import RBFKernel
class TestPivotedCholesky(unittest.TestCase):
def test_pivoted_cholesky(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol.t().matmul(piv_chol)
self.assertTrue(approx_equal(covar_approx, covar_matrix))
def test_solve(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, 1)
rhs_vector = torch.randn(100)
shifted_covar_matrix = covar_matrix + torch.eye(size)
real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector)
approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, 1)
self.assertTrue(approx_equal(approx_solve, real_solve))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for pivoted cholesky<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from torch.autograd import Variable
from gpytorch.utils import pivoted_cholesky, approx_equal
from gpytorch.kernels import RBFKernel
class TestPivotedCholesky(unittest.TestCase):
def test_pivoted_cholesky(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol.t().matmul(piv_chol)
self.assertTrue(approx_equal(covar_approx, covar_matrix))
def test_solve(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, 1)
rhs_vector = torch.randn(100)
shifted_covar_matrix = covar_matrix + torch.eye(size)
real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector)
approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, 1)
self.assertTrue(approx_equal(approx_solve, real_solve))
if __name__ == '__main__':
unittest.main()
|
Add unit test for pivoted choleskyfrom __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from torch.autograd import Variable
from gpytorch.utils import pivoted_cholesky, approx_equal
from gpytorch.kernels import RBFKernel
class TestPivotedCholesky(unittest.TestCase):
def test_pivoted_cholesky(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol.t().matmul(piv_chol)
self.assertTrue(approx_equal(covar_approx, covar_matrix))
def test_solve(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, 1)
rhs_vector = torch.randn(100)
shifted_covar_matrix = covar_matrix + torch.eye(size)
real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector)
approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, 1)
self.assertTrue(approx_equal(approx_solve, real_solve))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for pivoted cholesky<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from torch.autograd import Variable
from gpytorch.utils import pivoted_cholesky, approx_equal
from gpytorch.kernels import RBFKernel
class TestPivotedCholesky(unittest.TestCase):
def test_pivoted_cholesky(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
covar_approx = piv_chol.t().matmul(piv_chol)
self.assertTrue(approx_equal(covar_approx, covar_matrix))
def test_solve(self):
size = 100
train_x = Variable(torch.linspace(0, 1, size))
covar_matrix = RBFKernel()(train_x, train_x).data
piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10)
woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, 1)
rhs_vector = torch.randn(100)
shifted_covar_matrix = covar_matrix + torch.eye(size)
real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector)
approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, 1)
self.assertTrue(approx_equal(approx_solve, real_solve))
if __name__ == '__main__':
unittest.main()
|
|
8798a525cbb0c38f1c31a86dff81c6b2bae1ac42
|
pony_barn/build-compressor.py
|
pony_barn/build-compressor.py
|
import sys
from base import BaseBuild
from pony_build import client as pony
class PonyBuild(BaseBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.repo_url = "git://github.com/mintchaos/django_compressor.git"
self.name = "django_compressor"
self.required = ['django']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.TestCommand([self.context.python, 'tests/manage.py', 'test', '--settings', 'tests.settings'], name='run tests')
]
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
Add a build for django_compressor.
|
Add a build for django_compressor.
|
Python
|
mit
|
ericholscher/pony_barn,ericholscher/pony_barn
|
Add a build for django_compressor.
|
import sys
from base import BaseBuild
from pony_build import client as pony
class PonyBuild(BaseBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.repo_url = "git://github.com/mintchaos/django_compressor.git"
self.name = "django_compressor"
self.required = ['django']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.TestCommand([self.context.python, 'tests/manage.py', 'test', '--settings', 'tests.settings'], name='run tests')
]
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
<commit_before><commit_msg>Add a build for django_compressor.<commit_after>
|
import sys
from base import BaseBuild
from pony_build import client as pony
class PonyBuild(BaseBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.repo_url = "git://github.com/mintchaos/django_compressor.git"
self.name = "django_compressor"
self.required = ['django']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.TestCommand([self.context.python, 'tests/manage.py', 'test', '--settings', 'tests.settings'], name='run tests')
]
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
Add a build for django_compressor.import sys
from base import BaseBuild
from pony_build import client as pony
class PonyBuild(BaseBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.repo_url = "git://github.com/mintchaos/django_compressor.git"
self.name = "django_compressor"
self.required = ['django']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.TestCommand([self.context.python, 'tests/manage.py', 'test', '--settings', 'tests.settings'], name='run tests')
]
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
<commit_before><commit_msg>Add a build for django_compressor.<commit_after>import sys
from base import BaseBuild
from pony_build import client as pony
class PonyBuild(BaseBuild):
def __init__(self):
super(PonyBuild, self).__init__()
self.repo_url = "git://github.com/mintchaos/django_compressor.git"
self.name = "django_compressor"
self.required = ['django']
def define_commands(self):
self.commands = [ pony.GitClone(self.repo_url),
pony.TestCommand([self.context.python, 'tests/manage.py', 'test', '--settings', 'tests.settings'], name='run tests')
]
if __name__ == '__main__':
build = PonyBuild()
sys.exit(build.execute(sys.argv))
|
|
982182bef55925eb02e15e819fa7e6c41ba3c12f
|
demos/ant.core/02-capabilities-USB.py
|
demos/ant.core/02-capabilities-USB.py
|
"""
Interrogate stick for supported capabilities.
"""
import sys
from ant.core import driver
from ant.core import node
from config import *
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Interrogate stick
# Note: This method will return immediately, as the stick's capabilities are
# interrogated on node initialization (node.start()) in order to set proper
# internal Node instance state.
capabilities = antnode.getCapabilities()
print 'Maximum channels:', capabilities[0]
print 'Maximum network keys:', capabilities[1]
print 'Standard options: %X' % capabilities[2][0]
print 'Advanced options: %X' % capabilities[2][1]
# Shutdown
antnode.stop()
|
Add an example of how to use the USB driver
|
Add an example of how to use the USB driver
|
Python
|
mit
|
mvillalba/python-ant,baderj/python-ant,ramunasd/python-ant,mch/python-ant,mch/python-ant,ramunasd/python-ant,tomwardill/python-ant,SamyCookie/python-ant,SamyCookie/python-ant,mvillalba/python-ant,baderj/python-ant
|
Add an example of how to use the USB driver
|
"""
Interrogate stick for supported capabilities.
"""
import sys
from ant.core import driver
from ant.core import node
from config import *
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Interrogate stick
# Note: This method will return immediately, as the stick's capabilities are
# interrogated on node initialization (node.start()) in order to set proper
# internal Node instance state.
capabilities = antnode.getCapabilities()
print 'Maximum channels:', capabilities[0]
print 'Maximum network keys:', capabilities[1]
print 'Standard options: %X' % capabilities[2][0]
print 'Advanced options: %X' % capabilities[2][1]
# Shutdown
antnode.stop()
|
<commit_before><commit_msg>Add an example of how to use the USB driver<commit_after>
|
"""
Interrogate stick for supported capabilities.
"""
import sys
from ant.core import driver
from ant.core import node
from config import *
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Interrogate stick
# Note: This method will return immediately, as the stick's capabilities are
# interrogated on node initialization (node.start()) in order to set proper
# internal Node instance state.
capabilities = antnode.getCapabilities()
print 'Maximum channels:', capabilities[0]
print 'Maximum network keys:', capabilities[1]
print 'Standard options: %X' % capabilities[2][0]
print 'Advanced options: %X' % capabilities[2][1]
# Shutdown
antnode.stop()
|
Add an example of how to use the USB driver"""
Interrogate stick for supported capabilities.
"""
import sys
from ant.core import driver
from ant.core import node
from config import *
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Interrogate stick
# Note: This method will return immediately, as the stick's capabilities are
# interrogated on node initialization (node.start()) in order to set proper
# internal Node instance state.
capabilities = antnode.getCapabilities()
print 'Maximum channels:', capabilities[0]
print 'Maximum network keys:', capabilities[1]
print 'Standard options: %X' % capabilities[2][0]
print 'Advanced options: %X' % capabilities[2][1]
# Shutdown
antnode.stop()
|
<commit_before><commit_msg>Add an example of how to use the USB driver<commit_after>"""
Interrogate stick for supported capabilities.
"""
import sys
from ant.core import driver
from ant.core import node
from config import *
# Initialize
stick = driver.USB2Driver(SERIAL, log=LOG, debug=DEBUG)
antnode = node.Node(stick)
antnode.start()
# Interrogate stick
# Note: This method will return immediately, as the stick's capabilities are
# interrogated on node initialization (node.start()) in order to set proper
# internal Node instance state.
capabilities = antnode.getCapabilities()
print 'Maximum channels:', capabilities[0]
print 'Maximum network keys:', capabilities[1]
print 'Standard options: %X' % capabilities[2][0]
print 'Advanced options: %X' % capabilities[2][1]
# Shutdown
antnode.stop()
|
|
afcf211801e3b84dd6a1b1c069b7e020db053e46
|
scripts/top_couplings.py
|
scripts/top_couplings.py
|
#!/usr/bin/env python
import numpy as np
import optparse
def main():
parser = optparse.OptionParser(usage="%prog [options] coupling_matrix")
parser.add_option("-s", "--min-separation", type=int, default=7, help="Set the minimum sequence separation of pairs to be outputted [default: %default]")
parser.add_option("-n", "--num-contacts", type=int, default=30, help="Set the number of pairs to output [default: %default]")
opt, args = parser.parse_args()
if len(args) != 1:
parser.error("Need positional argument!")
# load coupling matrix
mat = np.loadtxt(args[0])
# find top-scoring pairs with sufficient separation
top = get_top_pairs(mat, opt.num_contacts, opt.min_separation)
print("#i\tj\tconfidence")
for i, j, coupling in zip(top[0], top[1], mat[top]):
print("{0}\t{1}\t{2}".format(i, j, coupling))
def get_top_pairs(mat, num_contacts, min_separation):
"""Get the top-scoring contacts"""
idx_delta = np.arange(mat.shape[1])[np.newaxis, :] - np.arange(mat.shape[0])[:, np.newaxis]
mask = idx_delta < min_separation
mat_masked = np.copy(mat)
mat_masked[mask] = float("-inf")
top = mat_masked.argsort(axis=None)[::-1][:(num_contacts)]
top = (top % mat.shape[0]).astype(np.uint16), np.floor(top / mat.shape[0]).astype(np.uint16)
return top
if __name__ == '__main__':
main()
|
Add script to extract top couplings
|
Add script to extract top couplings
|
Python
|
agpl-3.0
|
soedinglab/CCMpred,soedinglab/CCMpred,soedinglab/CCMpred,soedinglab/CCMpred
|
Add script to extract top couplings
|
#!/usr/bin/env python
import numpy as np
import optparse
def main():
parser = optparse.OptionParser(usage="%prog [options] coupling_matrix")
parser.add_option("-s", "--min-separation", type=int, default=7, help="Set the minimum sequence separation of pairs to be outputted [default: %default]")
parser.add_option("-n", "--num-contacts", type=int, default=30, help="Set the number of pairs to output [default: %default]")
opt, args = parser.parse_args()
if len(args) != 1:
parser.error("Need positional argument!")
# load coupling matrix
mat = np.loadtxt(args[0])
# find top-scoring pairs with sufficient separation
top = get_top_pairs(mat, opt.num_contacts, opt.min_separation)
print("#i\tj\tconfidence")
for i, j, coupling in zip(top[0], top[1], mat[top]):
print("{0}\t{1}\t{2}".format(i, j, coupling))
def get_top_pairs(mat, num_contacts, min_separation):
"""Get the top-scoring contacts"""
idx_delta = np.arange(mat.shape[1])[np.newaxis, :] - np.arange(mat.shape[0])[:, np.newaxis]
mask = idx_delta < min_separation
mat_masked = np.copy(mat)
mat_masked[mask] = float("-inf")
top = mat_masked.argsort(axis=None)[::-1][:(num_contacts)]
top = (top % mat.shape[0]).astype(np.uint16), np.floor(top / mat.shape[0]).astype(np.uint16)
return top
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to extract top couplings<commit_after>
|
#!/usr/bin/env python
import numpy as np
import optparse
def main():
parser = optparse.OptionParser(usage="%prog [options] coupling_matrix")
parser.add_option("-s", "--min-separation", type=int, default=7, help="Set the minimum sequence separation of pairs to be outputted [default: %default]")
parser.add_option("-n", "--num-contacts", type=int, default=30, help="Set the number of pairs to output [default: %default]")
opt, args = parser.parse_args()
if len(args) != 1:
parser.error("Need positional argument!")
# load coupling matrix
mat = np.loadtxt(args[0])
# find top-scoring pairs with sufficient separation
top = get_top_pairs(mat, opt.num_contacts, opt.min_separation)
print("#i\tj\tconfidence")
for i, j, coupling in zip(top[0], top[1], mat[top]):
print("{0}\t{1}\t{2}".format(i, j, coupling))
def get_top_pairs(mat, num_contacts, min_separation):
"""Get the top-scoring contacts"""
idx_delta = np.arange(mat.shape[1])[np.newaxis, :] - np.arange(mat.shape[0])[:, np.newaxis]
mask = idx_delta < min_separation
mat_masked = np.copy(mat)
mat_masked[mask] = float("-inf")
top = mat_masked.argsort(axis=None)[::-1][:(num_contacts)]
top = (top % mat.shape[0]).astype(np.uint16), np.floor(top / mat.shape[0]).astype(np.uint16)
return top
if __name__ == '__main__':
main()
|
Add script to extract top couplings#!/usr/bin/env python
import numpy as np
import optparse
def main():
parser = optparse.OptionParser(usage="%prog [options] coupling_matrix")
parser.add_option("-s", "--min-separation", type=int, default=7, help="Set the minimum sequence separation of pairs to be outputted [default: %default]")
parser.add_option("-n", "--num-contacts", type=int, default=30, help="Set the number of pairs to output [default: %default]")
opt, args = parser.parse_args()
if len(args) != 1:
parser.error("Need positional argument!")
# load coupling matrix
mat = np.loadtxt(args[0])
# find top-scoring pairs with sufficient separation
top = get_top_pairs(mat, opt.num_contacts, opt.min_separation)
print("#i\tj\tconfidence")
for i, j, coupling in zip(top[0], top[1], mat[top]):
print("{0}\t{1}\t{2}".format(i, j, coupling))
def get_top_pairs(mat, num_contacts, min_separation):
"""Get the top-scoring contacts"""
idx_delta = np.arange(mat.shape[1])[np.newaxis, :] - np.arange(mat.shape[0])[:, np.newaxis]
mask = idx_delta < min_separation
mat_masked = np.copy(mat)
mat_masked[mask] = float("-inf")
top = mat_masked.argsort(axis=None)[::-1][:(num_contacts)]
top = (top % mat.shape[0]).astype(np.uint16), np.floor(top / mat.shape[0]).astype(np.uint16)
return top
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to extract top couplings<commit_after>#!/usr/bin/env python
import numpy as np
import optparse
def main():
parser = optparse.OptionParser(usage="%prog [options] coupling_matrix")
parser.add_option("-s", "--min-separation", type=int, default=7, help="Set the minimum sequence separation of pairs to be outputted [default: %default]")
parser.add_option("-n", "--num-contacts", type=int, default=30, help="Set the number of pairs to output [default: %default]")
opt, args = parser.parse_args()
if len(args) != 1:
parser.error("Need positional argument!")
# load coupling matrix
mat = np.loadtxt(args[0])
# find top-scoring pairs with sufficient separation
top = get_top_pairs(mat, opt.num_contacts, opt.min_separation)
print("#i\tj\tconfidence")
for i, j, coupling in zip(top[0], top[1], mat[top]):
print("{0}\t{1}\t{2}".format(i, j, coupling))
def get_top_pairs(mat, num_contacts, min_separation):
"""Get the top-scoring contacts"""
idx_delta = np.arange(mat.shape[1])[np.newaxis, :] - np.arange(mat.shape[0])[:, np.newaxis]
mask = idx_delta < min_separation
mat_masked = np.copy(mat)
mat_masked[mask] = float("-inf")
top = mat_masked.argsort(axis=None)[::-1][:(num_contacts)]
top = (top % mat.shape[0]).astype(np.uint16), np.floor(top / mat.shape[0]).astype(np.uint16)
return top
if __name__ == '__main__':
main()
|
|
8ed78d362cc24d10a0b692ad673c9cd55080b01c
|
gostop.py
|
gostop.py
|
import sys
import random
import argparse
from gostop import GameState, HumanAgent, RandomAgent
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--fixed-random-seed', dest='fixed_random_seed',
help='Use a fixed random seed to play a predictable game')
args = parser.parse_args()
if args.fixed_random_seed:
random.seed(args.fixed_random_seed)
players = [HumanAgent('Human'), RandomAgent('Deep Pink')]
state = GameState.new_game()
while True:
current_player = players[state.current_player]
print('*** {0}'.format(current_player))
print(state)
possible_actions = state.get_possible_actions()
if len(possible_actions) == 0:
raise Exception('No more actions')
action = current_player.get_action(state, possible_actions)
print('*** {0} takes action {1}'.format(current_player, str(action)))
last_player = state.current_player
state = state.generate_successor(action)
if (state.get_result(last_player) == 1):
print('*** {0} wins!'.format(players[last_player]))
break
if __name__ == '__main__':
main(sys.argv[1:])
|
Add basic state machine and game mechanics.
|
Add basic state machine and game mechanics.
|
Python
|
mit
|
reidlindsay/gostop
|
Add basic state machine and game mechanics.
|
import sys
import random
import argparse
from gostop import GameState, HumanAgent, RandomAgent
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--fixed-random-seed', dest='fixed_random_seed',
help='Use a fixed random seed to play a predictable game')
args = parser.parse_args()
if args.fixed_random_seed:
random.seed(args.fixed_random_seed)
players = [HumanAgent('Human'), RandomAgent('Deep Pink')]
state = GameState.new_game()
while True:
current_player = players[state.current_player]
print('*** {0}'.format(current_player))
print(state)
possible_actions = state.get_possible_actions()
if len(possible_actions) == 0:
raise Exception('No more actions')
action = current_player.get_action(state, possible_actions)
print('*** {0} takes action {1}'.format(current_player, str(action)))
last_player = state.current_player
state = state.generate_successor(action)
if (state.get_result(last_player) == 1):
print('*** {0} wins!'.format(players[last_player]))
break
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Add basic state machine and game mechanics.<commit_after>
|
import sys
import random
import argparse
from gostop import GameState, HumanAgent, RandomAgent
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--fixed-random-seed', dest='fixed_random_seed',
help='Use a fixed random seed to play a predictable game')
args = parser.parse_args()
if args.fixed_random_seed:
random.seed(args.fixed_random_seed)
players = [HumanAgent('Human'), RandomAgent('Deep Pink')]
state = GameState.new_game()
while True:
current_player = players[state.current_player]
print('*** {0}'.format(current_player))
print(state)
possible_actions = state.get_possible_actions()
if len(possible_actions) == 0:
raise Exception('No more actions')
action = current_player.get_action(state, possible_actions)
print('*** {0} takes action {1}'.format(current_player, str(action)))
last_player = state.current_player
state = state.generate_successor(action)
if (state.get_result(last_player) == 1):
print('*** {0} wins!'.format(players[last_player]))
break
if __name__ == '__main__':
main(sys.argv[1:])
|
Add basic state machine and game mechanics.import sys
import random
import argparse
from gostop import GameState, HumanAgent, RandomAgent
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--fixed-random-seed', dest='fixed_random_seed',
help='Use a fixed random seed to play a predictable game')
args = parser.parse_args()
if args.fixed_random_seed:
random.seed(args.fixed_random_seed)
players = [HumanAgent('Human'), RandomAgent('Deep Pink')]
state = GameState.new_game()
while True:
current_player = players[state.current_player]
print('*** {0}'.format(current_player))
print(state)
possible_actions = state.get_possible_actions()
if len(possible_actions) == 0:
raise Exception('No more actions')
action = current_player.get_action(state, possible_actions)
print('*** {0} takes action {1}'.format(current_player, str(action)))
last_player = state.current_player
state = state.generate_successor(action)
if (state.get_result(last_player) == 1):
print('*** {0} wins!'.format(players[last_player]))
break
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Add basic state machine and game mechanics.<commit_after>import sys
import random
import argparse
from gostop import GameState, HumanAgent, RandomAgent
def main(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--fixed-random-seed', dest='fixed_random_seed',
help='Use a fixed random seed to play a predictable game')
args = parser.parse_args()
if args.fixed_random_seed:
random.seed(args.fixed_random_seed)
players = [HumanAgent('Human'), RandomAgent('Deep Pink')]
state = GameState.new_game()
while True:
current_player = players[state.current_player]
print('*** {0}'.format(current_player))
print(state)
possible_actions = state.get_possible_actions()
if len(possible_actions) == 0:
raise Exception('No more actions')
action = current_player.get_action(state, possible_actions)
print('*** {0} takes action {1}'.format(current_player, str(action)))
last_player = state.current_player
state = state.generate_successor(action)
if (state.get_result(last_player) == 1):
print('*** {0} wins!'.format(players[last_player]))
break
if __name__ == '__main__':
main(sys.argv[1:])
|
|
a7f763d19a28be34d7a9a745a536c97d59720b24
|
checks/check_canny.py
|
checks/check_canny.py
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
black_and_white = iaa.RandomColorsBinaryImageColorizer(
color_fg=255, color_bg=0)
print("alpha=1.0, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, random color")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=[3, 13], black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=[3, 7],
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=3, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=3,
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("fully random")
image = ia.quokka_square((128, 128))
aug = iaa.Canny()
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
if __name__ == "__main__":
main()
|
Add check script for canny
|
Add check script for canny
|
Python
|
mit
|
aleju/ImageAugmenter,aleju/imgaug,aleju/imgaug
|
Add check script for canny
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
black_and_white = iaa.RandomColorsBinaryImageColorizer(
color_fg=255, color_bg=0)
print("alpha=1.0, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, random color")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=[3, 13], black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=[3, 7],
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=3, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=3,
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("fully random")
image = ia.quokka_square((128, 128))
aug = iaa.Canny()
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for canny<commit_after>
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
black_and_white = iaa.RandomColorsBinaryImageColorizer(
color_fg=255, color_bg=0)
print("alpha=1.0, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, random color")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=[3, 13], black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=[3, 7],
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=3, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=3,
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("fully random")
image = ia.quokka_square((128, 128))
aug = iaa.Canny()
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
if __name__ == "__main__":
main()
|
Add check script for cannyfrom __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
black_and_white = iaa.RandomColorsBinaryImageColorizer(
color_fg=255, color_bg=0)
print("alpha=1.0, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, random color")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=[3, 13], black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=[3, 7],
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=3, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=3,
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("fully random")
image = ia.quokka_square((128, 128))
aug = iaa.Canny()
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for canny<commit_after>from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
black_and_white = iaa.RandomColorsBinaryImageColorizer(
color_fg=255, color_bg=0)
print("alpha=1.0, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, random color")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=[3, 13], black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=[3, 7],
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("alpha=1.0, sobel ksize=3, black and white")
image = ia.quokka_square((128, 128))
aug = iaa.Canny(alpha=1.0, sobel_kernel_size=3,
colorizer=black_and_white)
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
print("fully random")
image = ia.quokka_square((128, 128))
aug = iaa.Canny()
ia.imshow(ia.draw_grid(aug(images=[image] * (5*5))))
if __name__ == "__main__":
main()
|
|
e8d56263545966520a1479e09e53836122b69923
|
models.py
|
models.py
|
import datetime
from flask import url_for
from Simpoll import db
class Poll(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
question = db.StringField(max_length=255, required=True)
option1 = db.StringField(max_length=255, required=True)
option2 = db.StringField(max_length=255, required=True)
option1upvotes = db.IntField(required=True)
option1downvotes = db.IntField(required=True)
option2upvotes = db.IntField(required=True)
option2downvotes = db.IntField(required=True)
def get_absolute_url(self):
# it's okay to use the first 7 bytes for url
# because first 4 bytes are time and next 3 are
# a machine id
return url_for('post', kwargs={"slug": self._id[0:6]})
def __unicode__(self):
return self.question
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
|
Add model for polls db
|
Add model for polls db
|
Python
|
mit
|
dpuleri/simpoll_backend,dpuleri/simpoll_backend,dpuleri/simpoll_backend,dpuleri/simpoll_backend
|
Add model for polls db
|
import datetime
from flask import url_for
from Simpoll import db
class Poll(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
question = db.StringField(max_length=255, required=True)
option1 = db.StringField(max_length=255, required=True)
option2 = db.StringField(max_length=255, required=True)
option1upvotes = db.IntField(required=True)
option1downvotes = db.IntField(required=True)
option2upvotes = db.IntField(required=True)
option2downvotes = db.IntField(required=True)
def get_absolute_url(self):
# it's okay to use the first 7 bytes for url
# because first 4 bytes are time and next 3 are
# a machine id
return url_for('post', kwargs={"slug": self._id[0:6]})
def __unicode__(self):
return self.question
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
|
<commit_before><commit_msg>Add model for polls db<commit_after>
|
import datetime
from flask import url_for
from Simpoll import db
class Poll(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
question = db.StringField(max_length=255, required=True)
option1 = db.StringField(max_length=255, required=True)
option2 = db.StringField(max_length=255, required=True)
option1upvotes = db.IntField(required=True)
option1downvotes = db.IntField(required=True)
option2upvotes = db.IntField(required=True)
option2downvotes = db.IntField(required=True)
def get_absolute_url(self):
# it's okay to use the first 7 bytes for url
# because first 4 bytes are time and next 3 are
# a machine id
return url_for('post', kwargs={"slug": self._id[0:6]})
def __unicode__(self):
return self.question
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
|
Add model for polls dbimport datetime
from flask import url_for
from Simpoll import db
class Poll(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
question = db.StringField(max_length=255, required=True)
option1 = db.StringField(max_length=255, required=True)
option2 = db.StringField(max_length=255, required=True)
option1upvotes = db.IntField(required=True)
option1downvotes = db.IntField(required=True)
option2upvotes = db.IntField(required=True)
option2downvotes = db.IntField(required=True)
def get_absolute_url(self):
# it's okay to use the first 7 bytes for url
# because first 4 bytes are time and next 3 are
# a machine id
return url_for('post', kwargs={"slug": self._id[0:6]})
def __unicode__(self):
return self.question
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
|
<commit_before><commit_msg>Add model for polls db<commit_after>import datetime
from flask import url_for
from Simpoll import db
class Poll(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
question = db.StringField(max_length=255, required=True)
option1 = db.StringField(max_length=255, required=True)
option2 = db.StringField(max_length=255, required=True)
option1upvotes = db.IntField(required=True)
option1downvotes = db.IntField(required=True)
option2upvotes = db.IntField(required=True)
option2downvotes = db.IntField(required=True)
def get_absolute_url(self):
# it's okay to use the first 7 bytes for url
# because first 4 bytes are time and next 3 are
# a machine id
return url_for('post', kwargs={"slug": self._id[0:6]})
def __unicode__(self):
return self.question
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
|
|
b2e91b9f05b7556c224c4b74566caed953075f95
|
open-hackathon-server/test/unittest/hk8s/test_k8s_expr_starter.py
|
open-hackathon-server/test/unittest/hk8s/test_k8s_expr_starter.py
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import unittest
from mock import Mock
# setup import path
try:
import hackathon # noqa
except ImportError:
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(BASE_DIR, "..", "..", "..", "src")))
from hackathon import Component, RequiredFeature, Context
from hackathon.expr.k8s_expr_starter import K8SExprStarter
from hackathon import Context
import test_k8s_conf
class K8SExprStarterTest(unittest.TestCase):
def setUp(self):
hackathon_manager = RequiredFeature("hackathon_manager")
template_library = RequiredFeature("template_library")
self.service = K8SExprStarter()
#prerequisites:
#1, mock a hackathon record in db
#2, mock an experiment record in db
#@unittest.skip("skip test_start_expr")
def test_start_expr(self):
#TODO: mock record in db
ctx = Context(template = template, user = user, hackathon = hackathon)
self.assertTrue(self.service.start_expr(ctx))
|
Add unit test cases for k8s expr starter
|
Add unit test cases for k8s expr starter
1, start_expr
Signed-off-by: chensong <ae7d166f23a6fc29809e39146fc64585e5c46680@linuxep.com>
|
Python
|
mit
|
juniwang/open-hackathon,juniwang/open-hackathon,juniwang/open-hackathon,msopentechcn/open-hackathon,juniwang/open-hackathon,msopentechcn/open-hackathon,msopentechcn/open-hackathon,msopentechcn/open-hackathon,juniwang/open-hackathon,msopentechcn/open-hackathon,msopentechcn/open-hackathon,juniwang/open-hackathon
|
Add unit test cases for k8s expr starter
1, start_expr
Signed-off-by: chensong <ae7d166f23a6fc29809e39146fc64585e5c46680@linuxep.com>
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import unittest
from mock import Mock
# setup import path
try:
import hackathon # noqa
except ImportError:
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(BASE_DIR, "..", "..", "..", "src")))
from hackathon import Component, RequiredFeature, Context
from hackathon.expr.k8s_expr_starter import K8SExprStarter
from hackathon import Context
import test_k8s_conf
class K8SExprStarterTest(unittest.TestCase):
def setUp(self):
hackathon_manager = RequiredFeature("hackathon_manager")
template_library = RequiredFeature("template_library")
self.service = K8SExprStarter()
#prerequisites:
#1, mock a hackathon record in db
#2, mock an experiment record in db
#@unittest.skip("skip test_start_expr")
def test_start_expr(self):
#TODO: mock record in db
ctx = Context(template = template, user = user, hackathon = hackathon)
self.assertTrue(self.service.start_expr(ctx))
|
<commit_before><commit_msg>Add unit test cases for k8s expr starter
1, start_expr
Signed-off-by: chensong <ae7d166f23a6fc29809e39146fc64585e5c46680@linuxep.com><commit_after>
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import unittest
from mock import Mock
# setup import path
try:
import hackathon # noqa
except ImportError:
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(BASE_DIR, "..", "..", "..", "src")))
from hackathon import Component, RequiredFeature, Context
from hackathon.expr.k8s_expr_starter import K8SExprStarter
from hackathon import Context
import test_k8s_conf
class K8SExprStarterTest(unittest.TestCase):
def setUp(self):
hackathon_manager = RequiredFeature("hackathon_manager")
template_library = RequiredFeature("template_library")
self.service = K8SExprStarter()
#prerequisites:
#1, mock a hackathon record in db
#2, mock an experiment record in db
#@unittest.skip("skip test_start_expr")
def test_start_expr(self):
#TODO: mock record in db
ctx = Context(template = template, user = user, hackathon = hackathon)
self.assertTrue(self.service.start_expr(ctx))
|
Add unit test cases for k8s expr starter
1, start_expr
Signed-off-by: chensong <ae7d166f23a6fc29809e39146fc64585e5c46680@linuxep.com># -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import unittest
from mock import Mock
# setup import path
try:
import hackathon # noqa
except ImportError:
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(BASE_DIR, "..", "..", "..", "src")))
from hackathon import Component, RequiredFeature, Context
from hackathon.expr.k8s_expr_starter import K8SExprStarter
from hackathon import Context
import test_k8s_conf
class K8SExprStarterTest(unittest.TestCase):
def setUp(self):
hackathon_manager = RequiredFeature("hackathon_manager")
template_library = RequiredFeature("template_library")
self.service = K8SExprStarter()
#prerequisites:
#1, mock a hackathon record in db
#2, mock an experiment record in db
#@unittest.skip("skip test_start_expr")
def test_start_expr(self):
#TODO: mock record in db
ctx = Context(template = template, user = user, hackathon = hackathon)
self.assertTrue(self.service.start_expr(ctx))
|
<commit_before><commit_msg>Add unit test cases for k8s expr starter
1, start_expr
Signed-off-by: chensong <ae7d166f23a6fc29809e39146fc64585e5c46680@linuxep.com><commit_after># -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import unittest
from mock import Mock
# setup import path
try:
import hackathon # noqa
except ImportError:
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(BASE_DIR, "..", "..", "..", "src")))
from hackathon import Component, RequiredFeature, Context
from hackathon.expr.k8s_expr_starter import K8SExprStarter
from hackathon import Context
import test_k8s_conf
class K8SExprStarterTest(unittest.TestCase):
def setUp(self):
hackathon_manager = RequiredFeature("hackathon_manager")
template_library = RequiredFeature("template_library")
self.service = K8SExprStarter()
#prerequisites:
#1, mock a hackathon record in db
#2, mock an experiment record in db
#@unittest.skip("skip test_start_expr")
def test_start_expr(self):
#TODO: mock record in db
ctx = Context(template = template, user = user, hackathon = hackathon)
self.assertTrue(self.service.start_expr(ctx))
|
|
317c1b8fcc64bb591e87bf3838c068ef979488c3
|
tools_webrtc/coverage/generate_coverage_command.py
|
tools_webrtc/coverage/generate_coverage_command.py
|
#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
"""
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'ortc_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*\''] +
['-c \'out/coverage/%s\'' % t for t in TESTS])
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add script for generating coverage.py commands.
|
Add script for generating coverage.py commands.
It's only correct for Linux for now.
Bug: chromium:844647
Change-Id: I8fce28d88a4d060553e30fe0862d34815988ba61
Reviewed-on: https://webrtc-review.googlesource.com/79149
Reviewed-by: Mirko Bonadei <d2c43c210eae6feef04f53bae50885e8152edcca@webrtc.org>
Commit-Queue: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#23418}
|
Python
|
bsd-3-clause
|
TimothyGu/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,ShiftMediaProject/libilbc,TimothyGu/libilbc
|
Add script for generating coverage.py commands.
It's only correct for Linux for now.
Bug: chromium:844647
Change-Id: I8fce28d88a4d060553e30fe0862d34815988ba61
Reviewed-on: https://webrtc-review.googlesource.com/79149
Reviewed-by: Mirko Bonadei <d2c43c210eae6feef04f53bae50885e8152edcca@webrtc.org>
Commit-Queue: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#23418}
|
#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
"""
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'ortc_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*\''] +
['-c \'out/coverage/%s\'' % t for t in TESTS])
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating coverage.py commands.
It's only correct for Linux for now.
Bug: chromium:844647
Change-Id: I8fce28d88a4d060553e30fe0862d34815988ba61
Reviewed-on: https://webrtc-review.googlesource.com/79149
Reviewed-by: Mirko Bonadei <d2c43c210eae6feef04f53bae50885e8152edcca@webrtc.org>
Commit-Queue: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#23418}<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
"""
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'ortc_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*\''] +
['-c \'out/coverage/%s\'' % t for t in TESTS])
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add script for generating coverage.py commands.
It's only correct for Linux for now.
Bug: chromium:844647
Change-Id: I8fce28d88a4d060553e30fe0862d34815988ba61
Reviewed-on: https://webrtc-review.googlesource.com/79149
Reviewed-by: Mirko Bonadei <d2c43c210eae6feef04f53bae50885e8152edcca@webrtc.org>
Commit-Queue: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#23418}#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
"""
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'ortc_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*\''] +
['-c \'out/coverage/%s\'' % t for t in TESTS])
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating coverage.py commands.
It's only correct for Linux for now.
Bug: chromium:844647
Change-Id: I8fce28d88a4d060553e30fe0862d34815988ba61
Reviewed-on: https://webrtc-review.googlesource.com/79149
Reviewed-by: Mirko Bonadei <d2c43c210eae6feef04f53bae50885e8152edcca@webrtc.org>
Commit-Queue: Patrik Höglund <b486136ee68458b09c92c86c39f451c87d6ef6a1@webrtc.org>
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#23418}<commit_after>#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Generates a command-line for coverage.py. Useful for manual coverage runs.
Before running the generated command line, do this:
gn gen out/coverage --args='use_clang_coverage=true is_component_build=false'
"""
import sys
TESTS = [
'video_capture_tests',
'webrtc_nonparallel_tests',
'video_engine_tests',
'tools_unittests',
'test_support_unittests',
'system_wrappers_unittests',
'rtc_unittests',
'rtc_stats_unittests',
'rtc_pc_unittests',
'rtc_media_unittests',
'peerconnection_unittests',
'ortc_unittests',
'modules_unittests',
'modules_tests',
'low_bandwidth_audio_test',
'common_video_unittests',
'common_audio_unittests',
'audio_decoder_unittests'
]
def main():
cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS +
['-b out/coverage', '-o out/report'] +
['-i=\'.*/out/.*|.*/third_party/.*\''] +
['-c \'out/coverage/%s\'' % t for t in TESTS])
def WithXvfb(binary):
return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary)
modules_unittests = 'out/coverage/modules_unittests'
cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests)
print ' '.join(cmd)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
8c2350db0106853b06d8889e873992873f4bf00a
|
smt_markdown.py
|
smt_markdown.py
|
#import pylab
#import numpy as npf
from sumatra.projects import load_project
from texttable import Texttable
from sumatra.formatting import HTMLFormatter
def markdown_table(records):
fields = ['label', 'timestamp', 'reason', 'duration']
table = Texttable()
table.set_cols_dtype(['t'] * len(fields))
rows = [fields]
for record in records:
rows.append([str(getattr(record, field)) for field in fields])
table.add_rows(rows)
out = table.draw().replace('=', '-')
out = out.replace('\n+-', '\n|-')
out = '|' + out[1:-1] + '|'
return out
def get_records(tags=[], parameters={}):
project = load_project()
records_list = []
for r in project.record_store.list(project.name):
if set(tags).issubset(set(r.tags)):
if set(parameters.items()).issubset(set(r.parameters.as_dict().items())):
records_list.append(r)
return records_list
if __name__ == '__main__':
records = get_records(tags=['serialnumber10'], parameters={'kPlus' : 100.0})
print markdown_table(records)
#print HTMLFormatter(records).table()
|
Add module to create markdown/HTML table entry and update ipynb.
|
Add module to create markdown/HTML table entry and update ipynb.
|
Python
|
mit
|
wd15/extremefill2D,wd15/extremefill2D
|
Add module to create markdown/HTML table entry and update ipynb.
|
#import pylab
#import numpy as npf
from sumatra.projects import load_project
from texttable import Texttable
from sumatra.formatting import HTMLFormatter
def markdown_table(records):
fields = ['label', 'timestamp', 'reason', 'duration']
table = Texttable()
table.set_cols_dtype(['t'] * len(fields))
rows = [fields]
for record in records:
rows.append([str(getattr(record, field)) for field in fields])
table.add_rows(rows)
out = table.draw().replace('=', '-')
out = out.replace('\n+-', '\n|-')
out = '|' + out[1:-1] + '|'
return out
def get_records(tags=[], parameters={}):
project = load_project()
records_list = []
for r in project.record_store.list(project.name):
if set(tags).issubset(set(r.tags)):
if set(parameters.items()).issubset(set(r.parameters.as_dict().items())):
records_list.append(r)
return records_list
if __name__ == '__main__':
records = get_records(tags=['serialnumber10'], parameters={'kPlus' : 100.0})
print markdown_table(records)
#print HTMLFormatter(records).table()
|
<commit_before><commit_msg>Add module to create markdown/HTML table entry and update ipynb.<commit_after>
|
#import pylab
#import numpy as npf
from sumatra.projects import load_project
from texttable import Texttable
from sumatra.formatting import HTMLFormatter
def markdown_table(records):
fields = ['label', 'timestamp', 'reason', 'duration']
table = Texttable()
table.set_cols_dtype(['t'] * len(fields))
rows = [fields]
for record in records:
rows.append([str(getattr(record, field)) for field in fields])
table.add_rows(rows)
out = table.draw().replace('=', '-')
out = out.replace('\n+-', '\n|-')
out = '|' + out[1:-1] + '|'
return out
def get_records(tags=[], parameters={}):
project = load_project()
records_list = []
for r in project.record_store.list(project.name):
if set(tags).issubset(set(r.tags)):
if set(parameters.items()).issubset(set(r.parameters.as_dict().items())):
records_list.append(r)
return records_list
if __name__ == '__main__':
records = get_records(tags=['serialnumber10'], parameters={'kPlus' : 100.0})
print markdown_table(records)
#print HTMLFormatter(records).table()
|
Add module to create markdown/HTML table entry and update ipynb.#import pylab
#import numpy as npf
from sumatra.projects import load_project
from texttable import Texttable
from sumatra.formatting import HTMLFormatter
def markdown_table(records):
fields = ['label', 'timestamp', 'reason', 'duration']
table = Texttable()
table.set_cols_dtype(['t'] * len(fields))
rows = [fields]
for record in records:
rows.append([str(getattr(record, field)) for field in fields])
table.add_rows(rows)
out = table.draw().replace('=', '-')
out = out.replace('\n+-', '\n|-')
out = '|' + out[1:-1] + '|'
return out
def get_records(tags=[], parameters={}):
project = load_project()
records_list = []
for r in project.record_store.list(project.name):
if set(tags).issubset(set(r.tags)):
if set(parameters.items()).issubset(set(r.parameters.as_dict().items())):
records_list.append(r)
return records_list
if __name__ == '__main__':
records = get_records(tags=['serialnumber10'], parameters={'kPlus' : 100.0})
print markdown_table(records)
#print HTMLFormatter(records).table()
|
<commit_before><commit_msg>Add module to create markdown/HTML table entry and update ipynb.<commit_after>#import pylab
#import numpy as npf
from sumatra.projects import load_project
from texttable import Texttable
from sumatra.formatting import HTMLFormatter
def markdown_table(records):
fields = ['label', 'timestamp', 'reason', 'duration']
table = Texttable()
table.set_cols_dtype(['t'] * len(fields))
rows = [fields]
for record in records:
rows.append([str(getattr(record, field)) for field in fields])
table.add_rows(rows)
out = table.draw().replace('=', '-')
out = out.replace('\n+-', '\n|-')
out = '|' + out[1:-1] + '|'
return out
def get_records(tags=[], parameters={}):
project = load_project()
records_list = []
for r in project.record_store.list(project.name):
if set(tags).issubset(set(r.tags)):
if set(parameters.items()).issubset(set(r.parameters.as_dict().items())):
records_list.append(r)
return records_list
if __name__ == '__main__':
records = get_records(tags=['serialnumber10'], parameters={'kPlus' : 100.0})
print markdown_table(records)
#print HTMLFormatter(records).table()
|
|
aa64072c2cc08e1b5fee872216377d14a567a223
|
metric.py
|
metric.py
|
import string
pct = string.punctuation
articles = ['a','an','the']
def _clean_string(x):
# lowercase, remove articles, remove punctuation,
# and return as a single string without whitespace
toks = filter(lambda t:t not in articles, x.lower().split())
return ''.join([''.join(filter(lambda c:c not in pct, list(t)))
for t in toks])
def exact_match(x1, x2):
xc1 = _clean_string(x1)
xc2 = _clean_string(x2)
return xc1==xc2
def f1_match(x1, x2):
# x1 is treated as Ground Truth.
tok1 = set(map(lambda t:_clean_string(t), x1.split()))
tok2 = set(map(lambda t:_clean_string(t), x2.split()))
l1 = len(tok1)
l2 = len(tok2)
ovr = len(tok1.intersection(tok2))
if ovr==0: return 0.
prec = float(ovr)/l2
rec = float(ovr)/l1
return 2*prec*rec/(prec+rec)
|
Add script computing exact_match and f1_match for Quasar-T
|
Add script computing exact_match and f1_match for Quasar-T
|
Python
|
bsd-2-clause
|
bdhingra/quasar
|
Add script computing exact_match and f1_match for Quasar-T
|
import string
pct = string.punctuation
articles = ['a','an','the']
def _clean_string(x):
# lowercase, remove articles, remove punctuation,
# and return as a single string without whitespace
toks = filter(lambda t:t not in articles, x.lower().split())
return ''.join([''.join(filter(lambda c:c not in pct, list(t)))
for t in toks])
def exact_match(x1, x2):
xc1 = _clean_string(x1)
xc2 = _clean_string(x2)
return xc1==xc2
def f1_match(x1, x2):
# x1 is treated as Ground Truth.
tok1 = set(map(lambda t:_clean_string(t), x1.split()))
tok2 = set(map(lambda t:_clean_string(t), x2.split()))
l1 = len(tok1)
l2 = len(tok2)
ovr = len(tok1.intersection(tok2))
if ovr==0: return 0.
prec = float(ovr)/l2
rec = float(ovr)/l1
return 2*prec*rec/(prec+rec)
|
<commit_before><commit_msg>Add script computing exact_match and f1_match for Quasar-T<commit_after>
|
import string
pct = string.punctuation
articles = ['a','an','the']
def _clean_string(x):
# lowercase, remove articles, remove punctuation,
# and return as a single string without whitespace
toks = filter(lambda t:t not in articles, x.lower().split())
return ''.join([''.join(filter(lambda c:c not in pct, list(t)))
for t in toks])
def exact_match(x1, x2):
xc1 = _clean_string(x1)
xc2 = _clean_string(x2)
return xc1==xc2
def f1_match(x1, x2):
# x1 is treated as Ground Truth.
tok1 = set(map(lambda t:_clean_string(t), x1.split()))
tok2 = set(map(lambda t:_clean_string(t), x2.split()))
l1 = len(tok1)
l2 = len(tok2)
ovr = len(tok1.intersection(tok2))
if ovr==0: return 0.
prec = float(ovr)/l2
rec = float(ovr)/l1
return 2*prec*rec/(prec+rec)
|
Add script computing exact_match and f1_match for Quasar-Timport string
pct = string.punctuation
articles = ['a','an','the']
def _clean_string(x):
# lowercase, remove articles, remove punctuation,
# and return as a single string without whitespace
toks = filter(lambda t:t not in articles, x.lower().split())
return ''.join([''.join(filter(lambda c:c not in pct, list(t)))
for t in toks])
def exact_match(x1, x2):
xc1 = _clean_string(x1)
xc2 = _clean_string(x2)
return xc1==xc2
def f1_match(x1, x2):
# x1 is treated as Ground Truth.
tok1 = set(map(lambda t:_clean_string(t), x1.split()))
tok2 = set(map(lambda t:_clean_string(t), x2.split()))
l1 = len(tok1)
l2 = len(tok2)
ovr = len(tok1.intersection(tok2))
if ovr==0: return 0.
prec = float(ovr)/l2
rec = float(ovr)/l1
return 2*prec*rec/(prec+rec)
|
<commit_before><commit_msg>Add script computing exact_match and f1_match for Quasar-T<commit_after>import string
pct = string.punctuation
articles = ['a','an','the']
def _clean_string(x):
# lowercase, remove articles, remove punctuation,
# and return as a single string without whitespace
toks = filter(lambda t:t not in articles, x.lower().split())
return ''.join([''.join(filter(lambda c:c not in pct, list(t)))
for t in toks])
def exact_match(x1, x2):
xc1 = _clean_string(x1)
xc2 = _clean_string(x2)
return xc1==xc2
def f1_match(x1, x2):
# x1 is treated as Ground Truth.
tok1 = set(map(lambda t:_clean_string(t), x1.split()))
tok2 = set(map(lambda t:_clean_string(t), x2.split()))
l1 = len(tok1)
l2 = len(tok2)
ovr = len(tok1.intersection(tok2))
if ovr==0: return 0.
prec = float(ovr)/l2
rec = float(ovr)/l1
return 2*prec*rec/(prec+rec)
|
|
87456ed686e3ed55ce8abca68647f71653de709d
|
proximitySwTest.py
|
proximitySwTest.py
|
import RPi.GPIO as GPIO
import time
# Use GPIO numbers, not pin numbers (alt=BOARD for the numbers on the GPIO header pins)
GPIO.setmode(GPIO.BCM)
#set up the GPIO BCM channel 17 for input
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
lv=True;
cv=True;
while (True):
limiter_val = GPIO.input(17)
cam_val = GPIO.input(22)
if limiter_val and not lv:
print("Limiter switch closed")
if not limiter_val and lv:
print("Limiter switch opened")
lv = limiter_val
if cam_val and not cv:
print("Cam switch closed")
if not cam_val and cv:
print("Cam switch opened")
cv = cam_val
time.sleep(0.1)
|
Test script for machanical and optical limit switches on GPIO
|
Test script for machanical and optical limit switches on GPIO
|
Python
|
agpl-3.0
|
NohWayJose/PanCam
|
Test script for machanical and optical limit switches on GPIO
|
import RPi.GPIO as GPIO
import time
# Use GPIO numbers, not pin numbers (alt=BOARD for the numbers on the GPIO header pins)
GPIO.setmode(GPIO.BCM)
#set up the GPIO BCM channel 17 for input
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
lv=True;
cv=True;
while (True):
limiter_val = GPIO.input(17)
cam_val = GPIO.input(22)
if limiter_val and not lv:
print("Limiter switch closed")
if not limiter_val and lv:
print("Limiter switch opened")
lv = limiter_val
if cam_val and not cv:
print("Cam switch closed")
if not cam_val and cv:
print("Cam switch opened")
cv = cam_val
time.sleep(0.1)
|
<commit_before><commit_msg>Test script for machanical and optical limit switches on GPIO<commit_after>
|
import RPi.GPIO as GPIO
import time
# Use GPIO numbers, not pin numbers (alt=BOARD for the numbers on the GPIO header pins)
GPIO.setmode(GPIO.BCM)
#set up the GPIO BCM channel 17 for input
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
lv=True;
cv=True;
while (True):
limiter_val = GPIO.input(17)
cam_val = GPIO.input(22)
if limiter_val and not lv:
print("Limiter switch closed")
if not limiter_val and lv:
print("Limiter switch opened")
lv = limiter_val
if cam_val and not cv:
print("Cam switch closed")
if not cam_val and cv:
print("Cam switch opened")
cv = cam_val
time.sleep(0.1)
|
Test script for machanical and optical limit switches on GPIOimport RPi.GPIO as GPIO
import time
# Use GPIO numbers, not pin numbers (alt=BOARD for the numbers on the GPIO header pins)
GPIO.setmode(GPIO.BCM)
#set up the GPIO BCM channel 17 for input
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
lv=True;
cv=True;
while (True):
limiter_val = GPIO.input(17)
cam_val = GPIO.input(22)
if limiter_val and not lv:
print("Limiter switch closed")
if not limiter_val and lv:
print("Limiter switch opened")
lv = limiter_val
if cam_val and not cv:
print("Cam switch closed")
if not cam_val and cv:
print("Cam switch opened")
cv = cam_val
time.sleep(0.1)
|
<commit_before><commit_msg>Test script for machanical and optical limit switches on GPIO<commit_after>import RPi.GPIO as GPIO
import time
# Use GPIO numbers, not pin numbers (alt=BOARD for the numbers on the GPIO header pins)
GPIO.setmode(GPIO.BCM)
#set up the GPIO BCM channel 17 for input
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
lv=True;
cv=True;
while (True):
limiter_val = GPIO.input(17)
cam_val = GPIO.input(22)
if limiter_val and not lv:
print("Limiter switch closed")
if not limiter_val and lv:
print("Limiter switch opened")
lv = limiter_val
if cam_val and not cv:
print("Cam switch closed")
if not cam_val and cv:
print("Cam switch opened")
cv = cam_val
time.sleep(0.1)
|
|
01edb715a7716627fe3c73af74fa3c5bdd30995e
|
acq4/modules/MultiPatch/tests/test_logfile.py
|
acq4/modules/MultiPatch/tests/test_logfile.py
|
import numpy as np
from acq4.modules.MultiPatch.logfile import MultiPatchLog, IrregularTimeSeries
def test_timeseries_index():
ts1 = [
(10, 0.5),
(12, 13.4),
(29.8, 5),
(29.9, 6),
(30.0, 7),
(30.1, 8),
(35, 0),
]
ts2 = [
(10, (0.5, 13.4)),
(12, (13.4, 5)),
(29.8, (5, 0)),
(29.9, (6, -102.7)),
(30.0, (7, 23.)),
(30.1, (8, 0)),
(35, (0, 0)),
]
ts3 = [
(10, 'a'),
(12, 'b'),
(29.8, 'c'),
(29.9, 'd'),
(30.0, 'e'),
(30.1, 'f'),
(35, 'g'),
]
def lookup(t, ts):
# inefficient (but easier to test) method for doing timeseries lookup
# for comparison
low = None
for i,ev in enumerate(ts.events):
if ev[0] <= t:
low = i
else:
break
if low is None:
return None
if low+1 >= len(ts.events) or ts.interpolate is False:
return ts.events[low][1]
else:
t1, v1 = ts.events[low]
t2, v2 = ts.events[low+1]
return ts._interpolate(t, v1, v2, t1, t2)
for tsdata in (ts1, ts2, ts3):
for interp in (True, False):
if interp and isinstance(tsdata[0][1], str):
# don't test interpolation on strings
continue
for res in (0.1, 1.0, 10.0):
ts = IrregularTimeSeries(interpolate=interp, resolution=res)
for t,v in tsdata:
ts[t] = v
for t in np.arange(-1, 40, 0.1):
assert ts[t] == lookup(t, ts)
|
Add multipatch logfile unit tests
|
Add multipatch logfile unit tests
|
Python
|
mit
|
pbmanis/acq4,meganbkratz/acq4,meganbkratz/acq4,meganbkratz/acq4,acq4/acq4,pbmanis/acq4,pbmanis/acq4,pbmanis/acq4,acq4/acq4,campagnola/acq4,acq4/acq4,campagnola/acq4,acq4/acq4,campagnola/acq4,meganbkratz/acq4,campagnola/acq4
|
Add multipatch logfile unit tests
|
import numpy as np
from acq4.modules.MultiPatch.logfile import MultiPatchLog, IrregularTimeSeries
def test_timeseries_index():
ts1 = [
(10, 0.5),
(12, 13.4),
(29.8, 5),
(29.9, 6),
(30.0, 7),
(30.1, 8),
(35, 0),
]
ts2 = [
(10, (0.5, 13.4)),
(12, (13.4, 5)),
(29.8, (5, 0)),
(29.9, (6, -102.7)),
(30.0, (7, 23.)),
(30.1, (8, 0)),
(35, (0, 0)),
]
ts3 = [
(10, 'a'),
(12, 'b'),
(29.8, 'c'),
(29.9, 'd'),
(30.0, 'e'),
(30.1, 'f'),
(35, 'g'),
]
def lookup(t, ts):
# inefficient (but easier to test) method for doing timeseries lookup
# for comparison
low = None
for i,ev in enumerate(ts.events):
if ev[0] <= t:
low = i
else:
break
if low is None:
return None
if low+1 >= len(ts.events) or ts.interpolate is False:
return ts.events[low][1]
else:
t1, v1 = ts.events[low]
t2, v2 = ts.events[low+1]
return ts._interpolate(t, v1, v2, t1, t2)
for tsdata in (ts1, ts2, ts3):
for interp in (True, False):
if interp and isinstance(tsdata[0][1], str):
# don't test interpolation on strings
continue
for res in (0.1, 1.0, 10.0):
ts = IrregularTimeSeries(interpolate=interp, resolution=res)
for t,v in tsdata:
ts[t] = v
for t in np.arange(-1, 40, 0.1):
assert ts[t] == lookup(t, ts)
|
<commit_before><commit_msg>Add multipatch logfile unit tests<commit_after>
|
import numpy as np
from acq4.modules.MultiPatch.logfile import MultiPatchLog, IrregularTimeSeries
def test_timeseries_index():
ts1 = [
(10, 0.5),
(12, 13.4),
(29.8, 5),
(29.9, 6),
(30.0, 7),
(30.1, 8),
(35, 0),
]
ts2 = [
(10, (0.5, 13.4)),
(12, (13.4, 5)),
(29.8, (5, 0)),
(29.9, (6, -102.7)),
(30.0, (7, 23.)),
(30.1, (8, 0)),
(35, (0, 0)),
]
ts3 = [
(10, 'a'),
(12, 'b'),
(29.8, 'c'),
(29.9, 'd'),
(30.0, 'e'),
(30.1, 'f'),
(35, 'g'),
]
def lookup(t, ts):
# inefficient (but easier to test) method for doing timeseries lookup
# for comparison
low = None
for i,ev in enumerate(ts.events):
if ev[0] <= t:
low = i
else:
break
if low is None:
return None
if low+1 >= len(ts.events) or ts.interpolate is False:
return ts.events[low][1]
else:
t1, v1 = ts.events[low]
t2, v2 = ts.events[low+1]
return ts._interpolate(t, v1, v2, t1, t2)
for tsdata in (ts1, ts2, ts3):
for interp in (True, False):
if interp and isinstance(tsdata[0][1], str):
# don't test interpolation on strings
continue
for res in (0.1, 1.0, 10.0):
ts = IrregularTimeSeries(interpolate=interp, resolution=res)
for t,v in tsdata:
ts[t] = v
for t in np.arange(-1, 40, 0.1):
assert ts[t] == lookup(t, ts)
|
Add multipatch logfile unit testsimport numpy as np
from acq4.modules.MultiPatch.logfile import MultiPatchLog, IrregularTimeSeries
def test_timeseries_index():
ts1 = [
(10, 0.5),
(12, 13.4),
(29.8, 5),
(29.9, 6),
(30.0, 7),
(30.1, 8),
(35, 0),
]
ts2 = [
(10, (0.5, 13.4)),
(12, (13.4, 5)),
(29.8, (5, 0)),
(29.9, (6, -102.7)),
(30.0, (7, 23.)),
(30.1, (8, 0)),
(35, (0, 0)),
]
ts3 = [
(10, 'a'),
(12, 'b'),
(29.8, 'c'),
(29.9, 'd'),
(30.0, 'e'),
(30.1, 'f'),
(35, 'g'),
]
def lookup(t, ts):
# inefficient (but easier to test) method for doing timeseries lookup
# for comparison
low = None
for i,ev in enumerate(ts.events):
if ev[0] <= t:
low = i
else:
break
if low is None:
return None
if low+1 >= len(ts.events) or ts.interpolate is False:
return ts.events[low][1]
else:
t1, v1 = ts.events[low]
t2, v2 = ts.events[low+1]
return ts._interpolate(t, v1, v2, t1, t2)
for tsdata in (ts1, ts2, ts3):
for interp in (True, False):
if interp and isinstance(tsdata[0][1], str):
# don't test interpolation on strings
continue
for res in (0.1, 1.0, 10.0):
ts = IrregularTimeSeries(interpolate=interp, resolution=res)
for t,v in tsdata:
ts[t] = v
for t in np.arange(-1, 40, 0.1):
assert ts[t] == lookup(t, ts)
|
<commit_before><commit_msg>Add multipatch logfile unit tests<commit_after>import numpy as np
from acq4.modules.MultiPatch.logfile import MultiPatchLog, IrregularTimeSeries
def test_timeseries_index():
ts1 = [
(10, 0.5),
(12, 13.4),
(29.8, 5),
(29.9, 6),
(30.0, 7),
(30.1, 8),
(35, 0),
]
ts2 = [
(10, (0.5, 13.4)),
(12, (13.4, 5)),
(29.8, (5, 0)),
(29.9, (6, -102.7)),
(30.0, (7, 23.)),
(30.1, (8, 0)),
(35, (0, 0)),
]
ts3 = [
(10, 'a'),
(12, 'b'),
(29.8, 'c'),
(29.9, 'd'),
(30.0, 'e'),
(30.1, 'f'),
(35, 'g'),
]
def lookup(t, ts):
# inefficient (but easier to test) method for doing timeseries lookup
# for comparison
low = None
for i,ev in enumerate(ts.events):
if ev[0] <= t:
low = i
else:
break
if low is None:
return None
if low+1 >= len(ts.events) or ts.interpolate is False:
return ts.events[low][1]
else:
t1, v1 = ts.events[low]
t2, v2 = ts.events[low+1]
return ts._interpolate(t, v1, v2, t1, t2)
for tsdata in (ts1, ts2, ts3):
for interp in (True, False):
if interp and isinstance(tsdata[0][1], str):
# don't test interpolation on strings
continue
for res in (0.1, 1.0, 10.0):
ts = IrregularTimeSeries(interpolate=interp, resolution=res)
for t,v in tsdata:
ts[t] = v
for t in np.arange(-1, 40, 0.1):
assert ts[t] == lookup(t, ts)
|
|
6f9f87547ed193d01d00a6c0e6f3e3ad5206e4ed
|
sprotbot/notify.py
|
sprotbot/notify.py
|
import boto3
import os
def get_aws_creds():
"""
Pulls the AWS credentials from the current environment.
This assumes the following two environment variables are set:
- AWS_ACCESS_KEY_ID -- the public part of the AWS credential
- AWS_SECRET_ACCESS_KEY -- the private part of the AWS credential
It is also assumed that the AWS account to which these credentials belong
has write access to the SES service.
"""
try:
key_id = os.environ["AWS_ACCESS_KEY_ID"]
key_secret = os.environ["AWS_SECRET_ACCESS_KEY"]
except KeyError as ex:
raise AWSCredentialsError("AWS credentials not available in environment variables.")
return key_id, key_secret
class AWSCredentialsError(Exception):
pass
def send_email(sender, recipient, subject, message):
"""
Sends an email to a single recipient with a given subject and message.
Addresses can be formatted according to the pattern:
"Joe Bloggs <joe.bloggs@example.net>"
Message sent as plain text.
Assumptions:
1. sender needs to be verified in AWS.
2. if recipient not verified, then AWS account must not be in SES Sandbox.
"""
access_id, access_secret = get_aws_creds()
client = boto3.client("ses",
aws_access_key_id = access_id,
aws_secret_access_key = access_secret)
client.send_email(Source=sender,
Destination={"ToAddresses": [recipient]},
Message={"Subject": {"Data": subject},
"Body": {"Text": {"Data": message}}})
|
Add code for the mechanical aspects of sending te emails via AWS.
|
Add code for the mechanical aspects of sending te emails via AWS.
|
Python
|
mit
|
SUWS/sprotbot
|
Add code for the mechanical aspects of sending te emails via AWS.
|
import boto3
import os
def get_aws_creds():
"""
Pulls the AWS credentials from the current environment.
This assumes the following two environment variables are set:
- AWS_ACCESS_KEY_ID -- the public part of the AWS credential
- AWS_SECRET_ACCESS_KEY -- the private part of the AWS credential
It is also assumed that the AWS account to which these credentials belong
has write access to the SES service.
"""
try:
key_id = os.environ["AWS_ACCESS_KEY_ID"]
key_secret = os.environ["AWS_SECRET_ACCESS_KEY"]
except KeyError as ex:
raise AWSCredentialsError("AWS credentials not available in environment variables.")
return key_id, key_secret
class AWSCredentialsError(Exception):
pass
def send_email(sender, recipient, subject, message):
"""
Sends an email to a single recipient with a given subject and message.
Addresses can be formatted according to the pattern:
"Joe Bloggs <joe.bloggs@example.net>"
Message sent as plain text.
Assumptions:
1. sender needs to be verified in AWS.
2. if recipient not verified, then AWS account must not be in SES Sandbox.
"""
access_id, access_secret = get_aws_creds()
client = boto3.client("ses",
aws_access_key_id = access_id,
aws_secret_access_key = access_secret)
client.send_email(Source=sender,
Destination={"ToAddresses": [recipient]},
Message={"Subject": {"Data": subject},
"Body": {"Text": {"Data": message}}})
|
<commit_before><commit_msg>Add code for the mechanical aspects of sending te emails via AWS.<commit_after>
|
import boto3
import os
def get_aws_creds():
"""
Pulls the AWS credentials from the current environment.
This assumes the following two environment variables are set:
- AWS_ACCESS_KEY_ID -- the public part of the AWS credential
- AWS_SECRET_ACCESS_KEY -- the private part of the AWS credential
It is also assumed that the AWS account to which these credentials belong
has write access to the SES service.
"""
try:
key_id = os.environ["AWS_ACCESS_KEY_ID"]
key_secret = os.environ["AWS_SECRET_ACCESS_KEY"]
except KeyError as ex:
raise AWSCredentialsError("AWS credentials not available in environment variables.")
return key_id, key_secret
class AWSCredentialsError(Exception):
pass
def send_email(sender, recipient, subject, message):
"""
Sends an email to a single recipient with a given subject and message.
Addresses can be formatted according to the pattern:
"Joe Bloggs <joe.bloggs@example.net>"
Message sent as plain text.
Assumptions:
1. sender needs to be verified in AWS.
2. if recipient not verified, then AWS account must not be in SES Sandbox.
"""
access_id, access_secret = get_aws_creds()
client = boto3.client("ses",
aws_access_key_id = access_id,
aws_secret_access_key = access_secret)
client.send_email(Source=sender,
Destination={"ToAddresses": [recipient]},
Message={"Subject": {"Data": subject},
"Body": {"Text": {"Data": message}}})
|
Add code for the mechanical aspects of sending te emails via AWS.
import boto3
import os
def get_aws_creds():
"""
Pulls the AWS credentials from the current environment.
This assumes the following two environment variables are set:
- AWS_ACCESS_KEY_ID -- the public part of the AWS credential
- AWS_SECRET_ACCESS_KEY -- the private part of the AWS credential
It is also assumed that the AWS account to which these credentials belong
has write access to the SES service.
"""
try:
key_id = os.environ["AWS_ACCESS_KEY_ID"]
key_secret = os.environ["AWS_SECRET_ACCESS_KEY"]
except KeyError as ex:
raise AWSCredentialsError("AWS credentials not available in environment variables.")
return key_id, key_secret
class AWSCredentialsError(Exception):
pass
def send_email(sender, recipient, subject, message):
"""
Sends an email to a single recipient with a given subject and message.
Addresses can be formatted according to the pattern:
"Joe Bloggs <joe.bloggs@example.net>"
Message sent as plain text.
Assumptions:
1. sender needs to be verified in AWS.
2. if recipient not verified, then AWS account must not be in SES Sandbox.
"""
access_id, access_secret = get_aws_creds()
client = boto3.client("ses",
aws_access_key_id = access_id,
aws_secret_access_key = access_secret)
client.send_email(Source=sender,
Destination={"ToAddresses": [recipient]},
Message={"Subject": {"Data": subject},
"Body": {"Text": {"Data": message}}})
|
<commit_before><commit_msg>Add code for the mechanical aspects of sending te emails via AWS.<commit_after>
import boto3
import os
def get_aws_creds():
"""
Pulls the AWS credentials from the current environment.
This assumes the following two environment variables are set:
- AWS_ACCESS_KEY_ID -- the public part of the AWS credential
- AWS_SECRET_ACCESS_KEY -- the private part of the AWS credential
It is also assumed that the AWS account to which these credentials belong
has write access to the SES service.
"""
try:
key_id = os.environ["AWS_ACCESS_KEY_ID"]
key_secret = os.environ["AWS_SECRET_ACCESS_KEY"]
except KeyError as ex:
raise AWSCredentialsError("AWS credentials not available in environment variables.")
return key_id, key_secret
class AWSCredentialsError(Exception):
pass
def send_email(sender, recipient, subject, message):
"""
Sends an email to a single recipient with a given subject and message.
Addresses can be formatted according to the pattern:
"Joe Bloggs <joe.bloggs@example.net>"
Message sent as plain text.
Assumptions:
1. sender needs to be verified in AWS.
2. if recipient not verified, then AWS account must not be in SES Sandbox.
"""
access_id, access_secret = get_aws_creds()
client = boto3.client("ses",
aws_access_key_id = access_id,
aws_secret_access_key = access_secret)
client.send_email(Source=sender,
Destination={"ToAddresses": [recipient]},
Message={"Subject": {"Data": subject},
"Body": {"Text": {"Data": message}}})
|
|
25cb6af32c79466989b7d5404b885c3586740b53
|
zerver/management/commands/create_default_stream_groups.py
|
zerver/management/commands/create_default_stream_groups.py
|
from typing import Any
from argparse import ArgumentParser
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.actions import create_stream_if_needed
from zerver.models import MultiuseInvite, DefaultStream, DefaultStreamGroup
class Command(ZulipBaseCommand):
help = """
Create default stream groups which the users can choose during sign up.
./manage.py create_default_stream_groups -s gsoc-1,gsoc-2,gsoc-3 -d "Google summer of code" -r zulip
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
parser.add_argument(
'-d', '--default-stream-group',
dest='default_stream_group',
type=str,
required=True,
help='Name of the group you want to create.'
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
streams = []
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
try:
default_stream_group = DefaultStreamGroup.objects.get(name=options["default_stream_group"], realm=realm)
except DefaultStreamGroup.DoesNotExist:
default_stream_group = DefaultStreamGroup.objects.create(name=options["default_stream_group"], realm=realm)
default_stream_group.streams = streams
default_stream_group.save()
default_stream_groups = DefaultStreamGroup.objects.all()
for default_stream_group in default_stream_groups:
print(default_stream_group.name)
for stream in default_stream_group.streams.all():
print(stream.name)
|
Create command for creating default stream groups.
|
mangement: Create command for creating default stream groups.
|
Python
|
apache-2.0
|
jackrzhang/zulip,brainwane/zulip,showell/zulip,punchagan/zulip,hackerkid/zulip,hackerkid/zulip,brainwane/zulip,shubhamdhama/zulip,tommyip/zulip,shubhamdhama/zulip,rht/zulip,Galexrt/zulip,eeshangarg/zulip,synicalsyntax/zulip,brainwane/zulip,brockwhittaker/zulip,hackerkid/zulip,timabbott/zulip,synicalsyntax/zulip,Galexrt/zulip,eeshangarg/zulip,kou/zulip,synicalsyntax/zulip,zulip/zulip,jackrzhang/zulip,dhcrzf/zulip,rishig/zulip,shubhamdhama/zulip,timabbott/zulip,zulip/zulip,mahim97/zulip,timabbott/zulip,eeshangarg/zulip,punchagan/zulip,rht/zulip,kou/zulip,rht/zulip,dhcrzf/zulip,Galexrt/zulip,rht/zulip,jackrzhang/zulip,brockwhittaker/zulip,eeshangarg/zulip,tommyip/zulip,dhcrzf/zulip,zulip/zulip,dhcrzf/zulip,showell/zulip,punchagan/zulip,rishig/zulip,hackerkid/zulip,hackerkid/zulip,zulip/zulip,shubhamdhama/zulip,timabbott/zulip,brockwhittaker/zulip,mahim97/zulip,rht/zulip,andersk/zulip,kou/zulip,showell/zulip,shubhamdhama/zulip,mahim97/zulip,tommyip/zulip,mahim97/zulip,eeshangarg/zulip,Galexrt/zulip,synicalsyntax/zulip,synicalsyntax/zulip,timabbott/zulip,andersk/zulip,dhcrzf/zulip,hackerkid/zulip,jackrzhang/zulip,andersk/zulip,timabbott/zulip,hackerkid/zulip,brainwane/zulip,showell/zulip,jackrzhang/zulip,mahim97/zulip,punchagan/zulip,eeshangarg/zulip,shubhamdhama/zulip,punchagan/zulip,kou/zulip,andersk/zulip,dhcrzf/zulip,rishig/zulip,zulip/zulip,tommyip/zulip,zulip/zulip,Galexrt/zulip,rishig/zulip,rht/zulip,Galexrt/zulip,rht/zulip,rishig/zulip,jackrzhang/zulip,Galexrt/zulip,andersk/zulip,brainwane/zulip,showell/zulip,jackrzhang/zulip,showell/zulip,punchagan/zulip,punchagan/zulip,kou/zulip,eeshangarg/zulip,rishig/zulip,showell/zulip,timabbott/zulip,synicalsyntax/zulip,zulip/zulip,kou/zulip,brainwane/zulip,rishig/zulip,brockwhittaker/zulip,dhcrzf/zulip,synicalsyntax/zulip,kou/zulip,tommyip/zulip,brainwane/zulip,brockwhittaker/zulip,andersk/zulip,tommyip/zulip,brockwhittaker/zulip,andersk/zulip,mahim97/zulip,tommyip/zulip,shubhamdhama/zulip
|
mangement: Create command for creating default stream groups.
|
from typing import Any
from argparse import ArgumentParser
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.actions import create_stream_if_needed
from zerver.models import MultiuseInvite, DefaultStream, DefaultStreamGroup
class Command(ZulipBaseCommand):
help = """
Create default stream groups which the users can choose during sign up.
./manage.py create_default_stream_groups -s gsoc-1,gsoc-2,gsoc-3 -d "Google summer of code" -r zulip
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
parser.add_argument(
'-d', '--default-stream-group',
dest='default_stream_group',
type=str,
required=True,
help='Name of the group you want to create.'
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
streams = []
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
try:
default_stream_group = DefaultStreamGroup.objects.get(name=options["default_stream_group"], realm=realm)
except DefaultStreamGroup.DoesNotExist:
default_stream_group = DefaultStreamGroup.objects.create(name=options["default_stream_group"], realm=realm)
default_stream_group.streams = streams
default_stream_group.save()
default_stream_groups = DefaultStreamGroup.objects.all()
for default_stream_group in default_stream_groups:
print(default_stream_group.name)
for stream in default_stream_group.streams.all():
print(stream.name)
|
<commit_before><commit_msg>mangement: Create command for creating default stream groups.<commit_after>
|
from typing import Any
from argparse import ArgumentParser
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.actions import create_stream_if_needed
from zerver.models import MultiuseInvite, DefaultStream, DefaultStreamGroup
class Command(ZulipBaseCommand):
help = """
Create default stream groups which the users can choose during sign up.
./manage.py create_default_stream_groups -s gsoc-1,gsoc-2,gsoc-3 -d "Google summer of code" -r zulip
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
parser.add_argument(
'-d', '--default-stream-group',
dest='default_stream_group',
type=str,
required=True,
help='Name of the group you want to create.'
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
streams = []
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
try:
default_stream_group = DefaultStreamGroup.objects.get(name=options["default_stream_group"], realm=realm)
except DefaultStreamGroup.DoesNotExist:
default_stream_group = DefaultStreamGroup.objects.create(name=options["default_stream_group"], realm=realm)
default_stream_group.streams = streams
default_stream_group.save()
default_stream_groups = DefaultStreamGroup.objects.all()
for default_stream_group in default_stream_groups:
print(default_stream_group.name)
for stream in default_stream_group.streams.all():
print(stream.name)
|
mangement: Create command for creating default stream groups.
from typing import Any
from argparse import ArgumentParser
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.actions import create_stream_if_needed
from zerver.models import MultiuseInvite, DefaultStream, DefaultStreamGroup
class Command(ZulipBaseCommand):
help = """
Create default stream groups which the users can choose during sign up.
./manage.py create_default_stream_groups -s gsoc-1,gsoc-2,gsoc-3 -d "Google summer of code" -r zulip
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
parser.add_argument(
'-d', '--default-stream-group',
dest='default_stream_group',
type=str,
required=True,
help='Name of the group you want to create.'
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
streams = []
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
try:
default_stream_group = DefaultStreamGroup.objects.get(name=options["default_stream_group"], realm=realm)
except DefaultStreamGroup.DoesNotExist:
default_stream_group = DefaultStreamGroup.objects.create(name=options["default_stream_group"], realm=realm)
default_stream_group.streams = streams
default_stream_group.save()
default_stream_groups = DefaultStreamGroup.objects.all()
for default_stream_group in default_stream_groups:
print(default_stream_group.name)
for stream in default_stream_group.streams.all():
print(stream.name)
|
<commit_before><commit_msg>mangement: Create command for creating default stream groups.<commit_after>
from typing import Any
from argparse import ArgumentParser
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.actions import create_stream_if_needed
from zerver.models import MultiuseInvite, DefaultStream, DefaultStreamGroup
class Command(ZulipBaseCommand):
help = """
Create default stream groups which the users can choose during sign up.
./manage.py create_default_stream_groups -s gsoc-1,gsoc-2,gsoc-3 -d "Google summer of code" -r zulip
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
parser.add_argument(
'-d', '--default-stream-group',
dest='default_stream_group',
type=str,
required=True,
help='Name of the group you want to create.'
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
streams = []
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
try:
default_stream_group = DefaultStreamGroup.objects.get(name=options["default_stream_group"], realm=realm)
except DefaultStreamGroup.DoesNotExist:
default_stream_group = DefaultStreamGroup.objects.create(name=options["default_stream_group"], realm=realm)
default_stream_group.streams = streams
default_stream_group.save()
default_stream_groups = DefaultStreamGroup.objects.all()
for default_stream_group in default_stream_groups:
print(default_stream_group.name)
for stream in default_stream_group.streams.all():
print(stream.name)
|
|
262ec846a2ab961329c5a08a6f059cefb2c27de8
|
shapes.py
|
shapes.py
|
import tensorflow as tf
def shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A `Tensor`
"""
tt_raw_shape = raw_shape(tt)
if tt.is_tt_matrix():
return tf.reduce_prod(raw_shape, axis=1)
else:
print(tt_raw_shape.get_shape())
return tt_raw_shape[0]
def raw_shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A 2D `Tensor` of size ndims() x 1 or x 2
"""
num_dims = tt.ndims()
num_tensor_axis = len(tt.get_raw_shape())
final_raw_shape = []
for ax in range(num_tensor_axis):
curr_raw_shape = []
for core_idx in range(num_dims):
curr_raw_shape.append(tf.shape(tt.tt_cores[core_idx])[ax + 1])
# print('s', tf.stack(curr_raw_shape, axis=0).get_shape())
final_raw_shape.append(tf.stack(curr_raw_shape, axis=0))
# print('f', tf.stack(final_raw_shape, axis=0).get_shape())
return tf.stack(final_raw_shape, axis=0)
|
Implement dynamic shape and raw_shape
|
Implement dynamic shape and raw_shape
|
Python
|
mit
|
Bihaqo/t3f
|
Implement dynamic shape and raw_shape
|
import tensorflow as tf
def shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A `Tensor`
"""
tt_raw_shape = raw_shape(tt)
if tt.is_tt_matrix():
return tf.reduce_prod(raw_shape, axis=1)
else:
print(tt_raw_shape.get_shape())
return tt_raw_shape[0]
def raw_shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A 2D `Tensor` of size ndims() x 1 or x 2
"""
num_dims = tt.ndims()
num_tensor_axis = len(tt.get_raw_shape())
final_raw_shape = []
for ax in range(num_tensor_axis):
curr_raw_shape = []
for core_idx in range(num_dims):
curr_raw_shape.append(tf.shape(tt.tt_cores[core_idx])[ax + 1])
# print('s', tf.stack(curr_raw_shape, axis=0).get_shape())
final_raw_shape.append(tf.stack(curr_raw_shape, axis=0))
# print('f', tf.stack(final_raw_shape, axis=0).get_shape())
return tf.stack(final_raw_shape, axis=0)
|
<commit_before><commit_msg>Implement dynamic shape and raw_shape<commit_after>
|
import tensorflow as tf
def shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A `Tensor`
"""
tt_raw_shape = raw_shape(tt)
if tt.is_tt_matrix():
return tf.reduce_prod(raw_shape, axis=1)
else:
print(tt_raw_shape.get_shape())
return tt_raw_shape[0]
def raw_shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A 2D `Tensor` of size ndims() x 1 or x 2
"""
num_dims = tt.ndims()
num_tensor_axis = len(tt.get_raw_shape())
final_raw_shape = []
for ax in range(num_tensor_axis):
curr_raw_shape = []
for core_idx in range(num_dims):
curr_raw_shape.append(tf.shape(tt.tt_cores[core_idx])[ax + 1])
# print('s', tf.stack(curr_raw_shape, axis=0).get_shape())
final_raw_shape.append(tf.stack(curr_raw_shape, axis=0))
# print('f', tf.stack(final_raw_shape, axis=0).get_shape())
return tf.stack(final_raw_shape, axis=0)
|
Implement dynamic shape and raw_shapeimport tensorflow as tf
def shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A `Tensor`
"""
tt_raw_shape = raw_shape(tt)
if tt.is_tt_matrix():
return tf.reduce_prod(raw_shape, axis=1)
else:
print(tt_raw_shape.get_shape())
return tt_raw_shape[0]
def raw_shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A 2D `Tensor` of size ndims() x 1 or x 2
"""
num_dims = tt.ndims()
num_tensor_axis = len(tt.get_raw_shape())
final_raw_shape = []
for ax in range(num_tensor_axis):
curr_raw_shape = []
for core_idx in range(num_dims):
curr_raw_shape.append(tf.shape(tt.tt_cores[core_idx])[ax + 1])
# print('s', tf.stack(curr_raw_shape, axis=0).get_shape())
final_raw_shape.append(tf.stack(curr_raw_shape, axis=0))
# print('f', tf.stack(final_raw_shape, axis=0).get_shape())
return tf.stack(final_raw_shape, axis=0)
|
<commit_before><commit_msg>Implement dynamic shape and raw_shape<commit_after>import tensorflow as tf
def shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A `Tensor`
"""
tt_raw_shape = raw_shape(tt)
if tt.is_tt_matrix():
return tf.reduce_prod(raw_shape, axis=1)
else:
print(tt_raw_shape.get_shape())
return tt_raw_shape[0]
def raw_shape(tt):
"""Returns the shape of a TensorTrain.
This operation returns a 1-D integer tensor representing the shape of
the input. For TT-matrices the shape would have two values, see raw_shape for
the tensor shape.
Args:
tt: `TensorTrain` object.
Returns:
A 2D `Tensor` of size ndims() x 1 or x 2
"""
num_dims = tt.ndims()
num_tensor_axis = len(tt.get_raw_shape())
final_raw_shape = []
for ax in range(num_tensor_axis):
curr_raw_shape = []
for core_idx in range(num_dims):
curr_raw_shape.append(tf.shape(tt.tt_cores[core_idx])[ax + 1])
# print('s', tf.stack(curr_raw_shape, axis=0).get_shape())
final_raw_shape.append(tf.stack(curr_raw_shape, axis=0))
# print('f', tf.stack(final_raw_shape, axis=0).get_shape())
return tf.stack(final_raw_shape, axis=0)
|
|
d7d07ca5416e78c4c5eccc8cf80a9e3620beadba
|
tests/test_reuse.py
|
tests/test_reuse.py
|
from django.template import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import TemplateTestMixin
class TestReuse(TemplateTestMixin, SimpleTestCase):
TEMPLATES = {
'base': '''{% block main %}{% endblock %}''',
'reuse': '''{% extends 'base' %}{% load sniplates %}{% block true %}true{% endblock %}{% block main %}{% reuse 'true' %}{% endblock %}''',
}
def test_reuse(self):
tmpl = get_template('reuse')
output = tmpl.render(self.ctx)
self.assertEqual(output, 'true')
|
Add simple test for reuse
|
Add simple test for reuse
|
Python
|
mit
|
funkybob/django-sniplates,wengole/django-sniplates,sergei-maertens/django-sniplates,funkybob/django-sniplates,sergei-maertens/django-sniplates,kezabelle/django-sniplates,wengole/django-sniplates,kezabelle/django-sniplates,sergei-maertens/django-sniplates,kezabelle/django-sniplates
|
Add simple test for reuse
|
from django.template import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import TemplateTestMixin
class TestReuse(TemplateTestMixin, SimpleTestCase):
TEMPLATES = {
'base': '''{% block main %}{% endblock %}''',
'reuse': '''{% extends 'base' %}{% load sniplates %}{% block true %}true{% endblock %}{% block main %}{% reuse 'true' %}{% endblock %}''',
}
def test_reuse(self):
tmpl = get_template('reuse')
output = tmpl.render(self.ctx)
self.assertEqual(output, 'true')
|
<commit_before><commit_msg>Add simple test for reuse<commit_after>
|
from django.template import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import TemplateTestMixin
class TestReuse(TemplateTestMixin, SimpleTestCase):
TEMPLATES = {
'base': '''{% block main %}{% endblock %}''',
'reuse': '''{% extends 'base' %}{% load sniplates %}{% block true %}true{% endblock %}{% block main %}{% reuse 'true' %}{% endblock %}''',
}
def test_reuse(self):
tmpl = get_template('reuse')
output = tmpl.render(self.ctx)
self.assertEqual(output, 'true')
|
Add simple test for reuse
from django.template import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import TemplateTestMixin
class TestReuse(TemplateTestMixin, SimpleTestCase):
TEMPLATES = {
'base': '''{% block main %}{% endblock %}''',
'reuse': '''{% extends 'base' %}{% load sniplates %}{% block true %}true{% endblock %}{% block main %}{% reuse 'true' %}{% endblock %}''',
}
def test_reuse(self):
tmpl = get_template('reuse')
output = tmpl.render(self.ctx)
self.assertEqual(output, 'true')
|
<commit_before><commit_msg>Add simple test for reuse<commit_after>
from django.template import TemplateSyntaxError
from django.template.loader import get_template
from django.test import SimpleTestCase
from .utils import TemplateTestMixin
class TestReuse(TemplateTestMixin, SimpleTestCase):
TEMPLATES = {
'base': '''{% block main %}{% endblock %}''',
'reuse': '''{% extends 'base' %}{% load sniplates %}{% block true %}true{% endblock %}{% block main %}{% reuse 'true' %}{% endblock %}''',
}
def test_reuse(self):
tmpl = get_template('reuse')
output = tmpl.render(self.ctx)
self.assertEqual(output, 'true')
|
|
e5495db9f28b3fc1bd44c56c21e3aedb9a19e777
|
string.py
|
string.py
|
# Basic string exercises
# Fill in the definitions for the required functions. The main functions and the testing
# has been handled, so when you run a program, you will get an output of how many testcases
# passed and how many didn't.
# A. Odd Rotation
def odd_rotation(number, word):
"""
The function takes a number and a word as its arguments.
If the number is even, return the first three characters of the string.
If the number is odd, return a word formed by a concatenation of the last 2 letters
and the first 2 letters of the word.
For example,
odd_rotation(2, 'apple') -> 'app'
odd_rotation(5, 'cabbage') -> 'geca'
"""
# Add your code here
return
# B. Flip Case
def flip_case(word):
"""
The function takes a word as its argument.
Return a word that has all lowercase characters swapped to uppercase and vice versa.
"""
# Add your code here
return
# C. And/Or ?
def and_or(word):
"""
The function takes a word as its argument.
If the word starts with 'a', return the index where the substring 'and' appears.
Otherwise, if the word ends with 'r', return the index where the substring 'or' appears.
"""
# Add your code here
return
# D. sin + cos
def sincos(number):
"""
The function takes a number as its argument.
Return a string formed by taking the first 5 characters of the result of the expression -
sin(number) + cos(number)
HINT : Look into the math module in more detail
"""
# Add your code here
return
|
Add first set of exercises
|
Add first set of exercises
|
Python
|
mit
|
AlexMathew/csipy-exercises
|
Add first set of exercises
|
# Basic string exercises
# Fill in the definitions for the required functions. The main functions and the testing
# has been handled, so when you run a program, you will get an output of how many testcases
# passed and how many didn't.
# A. Odd Rotation
def odd_rotation(number, word):
"""
The function takes a number and a word as its arguments.
If the number is even, return the first three characters of the string.
If the number is odd, return a word formed by a concatenation of the last 2 letters
and the first 2 letters of the word.
For example,
odd_rotation(2, 'apple') -> 'app'
odd_rotation(5, 'cabbage') -> 'geca'
"""
# Add your code here
return
# B. Flip Case
def flip_case(word):
"""
The function takes a word as its argument.
Return a word that has all lowercase characters swapped to uppercase and vice versa.
"""
# Add your code here
return
# C. And/Or ?
def and_or(word):
"""
The function takes a word as its argument.
If the word starts with 'a', return the index where the substring 'and' appears.
Otherwise, if the word ends with 'r', return the index where the substring 'or' appears.
"""
# Add your code here
return
# D. sin + cos
def sincos(number):
"""
The function takes a number as its argument.
Return a string formed by taking the first 5 characters of the result of the expression -
sin(number) + cos(number)
HINT : Look into the math module in more detail
"""
# Add your code here
return
|
<commit_before><commit_msg>Add first set of exercises<commit_after>
|
# Basic string exercises
# Fill in the definitions for the required functions. The main functions and the testing
# has been handled, so when you run a program, you will get an output of how many testcases
# passed and how many didn't.
# A. Odd Rotation
def odd_rotation(number, word):
"""
The function takes a number and a word as its arguments.
If the number is even, return the first three characters of the string.
If the number is odd, return a word formed by a concatenation of the last 2 letters
and the first 2 letters of the word.
For example,
odd_rotation(2, 'apple') -> 'app'
odd_rotation(5, 'cabbage') -> 'geca'
"""
# Add your code here
return
# B. Flip Case
def flip_case(word):
"""
The function takes a word as its argument.
Return a word that has all lowercase characters swapped to uppercase and vice versa.
"""
# Add your code here
return
# C. And/Or ?
def and_or(word):
"""
The function takes a word as its argument.
If the word starts with 'a', return the index where the substring 'and' appears.
Otherwise, if the word ends with 'r', return the index where the substring 'or' appears.
"""
# Add your code here
return
# D. sin + cos
def sincos(number):
"""
The function takes a number as its argument.
Return a string formed by taking the first 5 characters of the result of the expression -
sin(number) + cos(number)
HINT : Look into the math module in more detail
"""
# Add your code here
return
|
Add first set of exercises# Basic string exercises
# Fill in the definitions for the required functions. The main functions and the testing
# has been handled, so when you run a program, you will get an output of how many testcases
# passed and how many didn't.
# A. Odd Rotation
def odd_rotation(number, word):
"""
The function takes a number and a word as its arguments.
If the number is even, return the first three characters of the string.
If the number is odd, return a word formed by a concatenation of the last 2 letters
and the first 2 letters of the word.
For example,
odd_rotation(2, 'apple') -> 'app'
odd_rotation(5, 'cabbage') -> 'geca'
"""
# Add your code here
return
# B. Flip Case
def flip_case(word):
"""
The function takes a word as its argument.
Return a word that has all lowercase characters swapped to uppercase and vice versa.
"""
# Add your code here
return
# C. And/Or ?
def and_or(word):
"""
The function takes a word as its argument.
If the word starts with 'a', return the index where the substring 'and' appears.
Otherwise, if the word ends with 'r', return the index where the substring 'or' appears.
"""
# Add your code here
return
# D. sin + cos
def sincos(number):
"""
The function takes a number as its argument.
Return a string formed by taking the first 5 characters of the result of the expression -
sin(number) + cos(number)
HINT : Look into the math module in more detail
"""
# Add your code here
return
|
<commit_before><commit_msg>Add first set of exercises<commit_after># Basic string exercises
# Fill in the definitions for the required functions. The main functions and the testing
# has been handled, so when you run a program, you will get an output of how many testcases
# passed and how many didn't.
# A. Odd Rotation
def odd_rotation(number, word):
"""
The function takes a number and a word as its arguments.
If the number is even, return the first three characters of the string.
If the number is odd, return a word formed by a concatenation of the last 2 letters
and the first 2 letters of the word.
For example,
odd_rotation(2, 'apple') -> 'app'
odd_rotation(5, 'cabbage') -> 'geca'
"""
# Add your code here
return
# B. Flip Case
def flip_case(word):
"""
The function takes a word as its argument.
Return a word that has all lowercase characters swapped to uppercase and vice versa.
"""
# Add your code here
return
# C. And/Or ?
def and_or(word):
"""
The function takes a word as its argument.
If the word starts with 'a', return the index where the substring 'and' appears.
Otherwise, if the word ends with 'r', return the index where the substring 'or' appears.
"""
# Add your code here
return
# D. sin + cos
def sincos(number):
"""
The function takes a number as its argument.
Return a string formed by taking the first 5 characters of the result of the expression -
sin(number) + cos(number)
HINT : Look into the math module in more detail
"""
# Add your code here
return
|
|
a11bca7772de655a6629d0070181cd4e027fc0d4
|
accelerator/tests/test_utils.py
|
accelerator/tests/test_utils.py
|
from bullet_train import BulletTrain
from mock import patch
from django.test import TestCase
from ..utils import flag_smith_has_feature
class TestUtils(TestCase):
def test_flag_smith_has_feature(self):
with patch("bullet_train.BulletTrain.has_feature") as mock_function:
flag_smith_has_feature("feature_key")
mock_function.assert_called_with("feature_key")
|
Add quick unit test for flag_smith_has_feature
|
[AC-8173] Add quick unit test for flag_smith_has_feature
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-8173] Add quick unit test for flag_smith_has_feature
|
from bullet_train import BulletTrain
from mock import patch
from django.test import TestCase
from ..utils import flag_smith_has_feature
class TestUtils(TestCase):
def test_flag_smith_has_feature(self):
with patch("bullet_train.BulletTrain.has_feature") as mock_function:
flag_smith_has_feature("feature_key")
mock_function.assert_called_with("feature_key")
|
<commit_before><commit_msg>[AC-8173] Add quick unit test for flag_smith_has_feature<commit_after>
|
from bullet_train import BulletTrain
from mock import patch
from django.test import TestCase
from ..utils import flag_smith_has_feature
class TestUtils(TestCase):
def test_flag_smith_has_feature(self):
with patch("bullet_train.BulletTrain.has_feature") as mock_function:
flag_smith_has_feature("feature_key")
mock_function.assert_called_with("feature_key")
|
[AC-8173] Add quick unit test for flag_smith_has_featurefrom bullet_train import BulletTrain
from mock import patch
from django.test import TestCase
from ..utils import flag_smith_has_feature
class TestUtils(TestCase):
def test_flag_smith_has_feature(self):
with patch("bullet_train.BulletTrain.has_feature") as mock_function:
flag_smith_has_feature("feature_key")
mock_function.assert_called_with("feature_key")
|
<commit_before><commit_msg>[AC-8173] Add quick unit test for flag_smith_has_feature<commit_after>from bullet_train import BulletTrain
from mock import patch
from django.test import TestCase
from ..utils import flag_smith_has_feature
class TestUtils(TestCase):
def test_flag_smith_has_feature(self):
with patch("bullet_train.BulletTrain.has_feature") as mock_function:
flag_smith_has_feature("feature_key")
mock_function.assert_called_with("feature_key")
|
|
2e7bff01ce3b15b943691a82e766ceffda0cc355
|
fourth_edition/ch2_linked_lists/python/2.1.py
|
fourth_edition/ch2_linked_lists/python/2.1.py
|
'''
Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
'''
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, next_node):
self.next_node = next_node
dup_data = dict()
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def insert_node(self, data):
new_node = Node(data, self.head)
self.head = new_node
# print(data, " inserted!")
def traversal(self):
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
print(data_, end=" ")
curr_node = curr_node.get_next_node()
print("")
def build_dup_hashset(self):
global dup_data
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
if data_ in dup_data:
count = dup_data[data_]
dup_data[data_] = count + 1
else:
dup_data[data_] = 1
curr_node = curr_node.get_next_node()
def del_node(self, data):
curr_node = self.head
prev_node = None
while curr_node:
if curr_node.get_data() == data:
if prev_node:
prev_node.set_next_node(curr_node.get_next_node())
else:
self.head = curr_node.get_next_node()
print(data, " deleted!")
return
else:
prev_node = curr_node
curr_node = curr_node.get_next_node()
def del_dups(self):
global dup_data
self.build_dup_hashset()
for k, v in dup_data.items():
if v > 1:
for _ in range(v-1):
self.del_node(k)
dup_data[k] = v - 1
myLL = LinkedList()
myLL.insert_node(10)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(30)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(60)
myLL.insert_node(60)
print("Traversing the original linked list")
myLL.traversal()
print("Deleting duplicate data")
print(myLL.del_dups())
print("Traversing the de-duplicated linked list")
myLL.traversal()
|
Remove duplicates from an unsorted linked list
|
Remove duplicates from an unsorted linked list
|
Python
|
mit
|
roommen/CtCI
|
Remove duplicates from an unsorted linked list
|
'''
Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
'''
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, next_node):
self.next_node = next_node
dup_data = dict()
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def insert_node(self, data):
new_node = Node(data, self.head)
self.head = new_node
# print(data, " inserted!")
def traversal(self):
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
print(data_, end=" ")
curr_node = curr_node.get_next_node()
print("")
def build_dup_hashset(self):
global dup_data
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
if data_ in dup_data:
count = dup_data[data_]
dup_data[data_] = count + 1
else:
dup_data[data_] = 1
curr_node = curr_node.get_next_node()
def del_node(self, data):
curr_node = self.head
prev_node = None
while curr_node:
if curr_node.get_data() == data:
if prev_node:
prev_node.set_next_node(curr_node.get_next_node())
else:
self.head = curr_node.get_next_node()
print(data, " deleted!")
return
else:
prev_node = curr_node
curr_node = curr_node.get_next_node()
def del_dups(self):
global dup_data
self.build_dup_hashset()
for k, v in dup_data.items():
if v > 1:
for _ in range(v-1):
self.del_node(k)
dup_data[k] = v - 1
myLL = LinkedList()
myLL.insert_node(10)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(30)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(60)
myLL.insert_node(60)
print("Traversing the original linked list")
myLL.traversal()
print("Deleting duplicate data")
print(myLL.del_dups())
print("Traversing the de-duplicated linked list")
myLL.traversal()
|
<commit_before><commit_msg>Remove duplicates from an unsorted linked list<commit_after>
|
'''
Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
'''
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, next_node):
self.next_node = next_node
dup_data = dict()
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def insert_node(self, data):
new_node = Node(data, self.head)
self.head = new_node
# print(data, " inserted!")
def traversal(self):
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
print(data_, end=" ")
curr_node = curr_node.get_next_node()
print("")
def build_dup_hashset(self):
global dup_data
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
if data_ in dup_data:
count = dup_data[data_]
dup_data[data_] = count + 1
else:
dup_data[data_] = 1
curr_node = curr_node.get_next_node()
def del_node(self, data):
curr_node = self.head
prev_node = None
while curr_node:
if curr_node.get_data() == data:
if prev_node:
prev_node.set_next_node(curr_node.get_next_node())
else:
self.head = curr_node.get_next_node()
print(data, " deleted!")
return
else:
prev_node = curr_node
curr_node = curr_node.get_next_node()
def del_dups(self):
global dup_data
self.build_dup_hashset()
for k, v in dup_data.items():
if v > 1:
for _ in range(v-1):
self.del_node(k)
dup_data[k] = v - 1
myLL = LinkedList()
myLL.insert_node(10)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(30)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(60)
myLL.insert_node(60)
print("Traversing the original linked list")
myLL.traversal()
print("Deleting duplicate data")
print(myLL.del_dups())
print("Traversing the de-duplicated linked list")
myLL.traversal()
|
Remove duplicates from an unsorted linked list'''
Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
'''
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, next_node):
self.next_node = next_node
dup_data = dict()
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def insert_node(self, data):
new_node = Node(data, self.head)
self.head = new_node
# print(data, " inserted!")
def traversal(self):
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
print(data_, end=" ")
curr_node = curr_node.get_next_node()
print("")
def build_dup_hashset(self):
global dup_data
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
if data_ in dup_data:
count = dup_data[data_]
dup_data[data_] = count + 1
else:
dup_data[data_] = 1
curr_node = curr_node.get_next_node()
def del_node(self, data):
curr_node = self.head
prev_node = None
while curr_node:
if curr_node.get_data() == data:
if prev_node:
prev_node.set_next_node(curr_node.get_next_node())
else:
self.head = curr_node.get_next_node()
print(data, " deleted!")
return
else:
prev_node = curr_node
curr_node = curr_node.get_next_node()
def del_dups(self):
global dup_data
self.build_dup_hashset()
for k, v in dup_data.items():
if v > 1:
for _ in range(v-1):
self.del_node(k)
dup_data[k] = v - 1
myLL = LinkedList()
myLL.insert_node(10)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(30)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(60)
myLL.insert_node(60)
print("Traversing the original linked list")
myLL.traversal()
print("Deleting duplicate data")
print(myLL.del_dups())
print("Traversing the de-duplicated linked list")
myLL.traversal()
|
<commit_before><commit_msg>Remove duplicates from an unsorted linked list<commit_after>'''
Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
'''
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def set_data(self, data):
self.data = data
def get_next_node(self):
return self.next_node
def set_next_node(self, next_node):
self.next_node = next_node
dup_data = dict()
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def insert_node(self, data):
new_node = Node(data, self.head)
self.head = new_node
# print(data, " inserted!")
def traversal(self):
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
print(data_, end=" ")
curr_node = curr_node.get_next_node()
print("")
def build_dup_hashset(self):
global dup_data
curr_node = self.head
while curr_node:
data_ = curr_node.get_data()
if data_ in dup_data:
count = dup_data[data_]
dup_data[data_] = count + 1
else:
dup_data[data_] = 1
curr_node = curr_node.get_next_node()
def del_node(self, data):
curr_node = self.head
prev_node = None
while curr_node:
if curr_node.get_data() == data:
if prev_node:
prev_node.set_next_node(curr_node.get_next_node())
else:
self.head = curr_node.get_next_node()
print(data, " deleted!")
return
else:
prev_node = curr_node
curr_node = curr_node.get_next_node()
def del_dups(self):
global dup_data
self.build_dup_hashset()
for k, v in dup_data.items():
if v > 1:
for _ in range(v-1):
self.del_node(k)
dup_data[k] = v - 1
myLL = LinkedList()
myLL.insert_node(10)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(20)
myLL.insert_node(30)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(50)
myLL.insert_node(60)
myLL.insert_node(60)
print("Traversing the original linked list")
myLL.traversal()
print("Deleting duplicate data")
print(myLL.del_dups())
print("Traversing the de-duplicated linked list")
myLL.traversal()
|
|
3e51aa99f27f5e61737cc900cbbdfe53bd8c212b
|
docstamp/filenames.py
|
docstamp/filenames.py
|
# coding=utf-8
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
# Universidad del Pais Vasco UPV/EHU
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
import tempfile
import os.path as op
import logging
from .config import get_temp_dir
log = logging.getLogger(__name__)
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = op.splitext(filepath)
except:
raise
else:
return ext
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def get_tempfile(suffix='.txt'):
"""
Parameters
----------
Returns
-------
"""
return tempfile.NamedTemporaryFile(suffix=suffix, dir=get_temp_dir())
|
Add helpers to manage file names
|
Add helpers to manage file names
|
Python
|
apache-2.0
|
PythonSanSebastian/docstamp
|
Add helpers to manage file names
|
# coding=utf-8
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
# Universidad del Pais Vasco UPV/EHU
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
import tempfile
import os.path as op
import logging
from .config import get_temp_dir
log = logging.getLogger(__name__)
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = op.splitext(filepath)
except:
raise
else:
return ext
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def get_tempfile(suffix='.txt'):
"""
Parameters
----------
Returns
-------
"""
return tempfile.NamedTemporaryFile(suffix=suffix, dir=get_temp_dir())
|
<commit_before><commit_msg>Add helpers to manage file names<commit_after>
|
# coding=utf-8
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
# Universidad del Pais Vasco UPV/EHU
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
import tempfile
import os.path as op
import logging
from .config import get_temp_dir
log = logging.getLogger(__name__)
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = op.splitext(filepath)
except:
raise
else:
return ext
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def get_tempfile(suffix='.txt'):
"""
Parameters
----------
Returns
-------
"""
return tempfile.NamedTemporaryFile(suffix=suffix, dir=get_temp_dir())
|
Add helpers to manage file names# coding=utf-8
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
# Universidad del Pais Vasco UPV/EHU
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
import tempfile
import os.path as op
import logging
from .config import get_temp_dir
log = logging.getLogger(__name__)
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = op.splitext(filepath)
except:
raise
else:
return ext
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def get_tempfile(suffix='.txt'):
"""
Parameters
----------
Returns
-------
"""
return tempfile.NamedTemporaryFile(suffix=suffix, dir=get_temp_dir())
|
<commit_before><commit_msg>Add helpers to manage file names<commit_after># coding=utf-8
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
# Universidad del Pais Vasco UPV/EHU
#
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
import tempfile
import os.path as op
import logging
from .config import get_temp_dir
log = logging.getLogger(__name__)
def get_extension(filepath, check_if_exists=False):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
try:
rest, ext = op.splitext(filepath)
except:
raise
else:
return ext
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
err = 'File not found: ' + filepath
log.error(err)
raise IOError(err)
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def get_tempfile(suffix='.txt'):
"""
Parameters
----------
Returns
-------
"""
return tempfile.NamedTemporaryFile(suffix=suffix, dir=get_temp_dir())
|
|
b1b0f1b58c0f14e4778a5a3f5342ee8064e805e7
|
src/htmgenerator.py
|
src/htmgenerator.py
|
'''
Created on 06 Jan 2016
Copyright (c) 2014 Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
if __name__ == '__main__':
pass
|
Add empty module for html generation
|
Add empty module for html generation
|
Python
|
mit
|
AlphaSheep/ZBLL-Sorter,AlphaSheep/ZBLL-Sorter,AlphaSheep/ZBLL-Sorter
|
Add empty module for html generation
|
'''
Created on 06 Jan 2016
Copyright (c) 2014 Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add empty module for html generation<commit_after>
|
'''
Created on 06 Jan 2016
Copyright (c) 2014 Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
if __name__ == '__main__':
pass
|
Add empty module for html generation'''
Created on 06 Jan 2016
Copyright (c) 2014 Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add empty module for html generation<commit_after>'''
Created on 06 Jan 2016
Copyright (c) 2014 Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
if __name__ == '__main__':
pass
|
|
c0fb4702f091e83f190ef84bb4f66c7521e83436
|
wunder.py
|
wunder.py
|
#!/usr/bin/env python
'''
Wunderground API command line script.
'''
import sys
import requests
# todo: raise error if no config file
from config import WUAPI
class Wunder(object):
''' Wunderground API class. '''
lookup_zip = 'http://api.wunderground.com/api/{}/geolookup/q/{}.json'
us_city_current_conditions = ('http://api.wunderground.com/api/{}/'
'conditions/q/{}/{}.json')
@classmethod
def check_call(cls, response):
''' Check that the call status is 200. '''
# todo: raise error if missing certain dict keys
if response.status_code != 200:
print response.text
raise Exception('Return status code is not 200.')
@classmethod
def conditions(cls, us_state, us_city):
''' Lookup weather given location. '''
us_city = us_city.replace(' ', '_')
call = cls.us_city_current_conditions.format(WUAPI, us_state, us_city)
response = requests.get(call)
cls.check_call(response)
info = response.json()
current_obs = info['current_observation']
fields = ['station_id', 'observation_time',
'temp_f', 'relative_humidity']
# todo: spruce-up output
for field in fields:
print current_obs[field]
# todo: expand command line api to lookup and list locations
@classmethod
def lookup_zipcode(cls, zipcode):
''' Lookup location and then lookup weather with first find. '''
call = cls.lookup_zip.format(WUAPI, zipcode)
response = requests.get(call)
cls.check_call(response)
info = response.json()
print info
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: wunder.py [state] [city]'
else:
STATE, CITY = (sys.argv[1], sys.argv[2])
Wunder.conditions(STATE, CITY)
|
Add basic Wunderground API wrapper.
|
Add basic Wunderground API wrapper.
|
Python
|
mit
|
abshinn/weather
|
Add basic Wunderground API wrapper.
|
#!/usr/bin/env python
'''
Wunderground API command line script.
'''
import sys
import requests
# todo: raise error if no config file
from config import WUAPI
class Wunder(object):
''' Wunderground API class. '''
lookup_zip = 'http://api.wunderground.com/api/{}/geolookup/q/{}.json'
us_city_current_conditions = ('http://api.wunderground.com/api/{}/'
'conditions/q/{}/{}.json')
@classmethod
def check_call(cls, response):
''' Check that the call status is 200. '''
# todo: raise error if missing certain dict keys
if response.status_code != 200:
print response.text
raise Exception('Return status code is not 200.')
@classmethod
def conditions(cls, us_state, us_city):
''' Lookup weather given location. '''
us_city = us_city.replace(' ', '_')
call = cls.us_city_current_conditions.format(WUAPI, us_state, us_city)
response = requests.get(call)
cls.check_call(response)
info = response.json()
current_obs = info['current_observation']
fields = ['station_id', 'observation_time',
'temp_f', 'relative_humidity']
# todo: spruce-up output
for field in fields:
print current_obs[field]
# todo: expand command line api to lookup and list locations
@classmethod
def lookup_zipcode(cls, zipcode):
''' Lookup location and then lookup weather with first find. '''
call = cls.lookup_zip.format(WUAPI, zipcode)
response = requests.get(call)
cls.check_call(response)
info = response.json()
print info
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: wunder.py [state] [city]'
else:
STATE, CITY = (sys.argv[1], sys.argv[2])
Wunder.conditions(STATE, CITY)
|
<commit_before><commit_msg>Add basic Wunderground API wrapper.<commit_after>
|
#!/usr/bin/env python
'''
Wunderground API command line script.
'''
import sys
import requests
# todo: raise error if no config file
from config import WUAPI
class Wunder(object):
''' Wunderground API class. '''
lookup_zip = 'http://api.wunderground.com/api/{}/geolookup/q/{}.json'
us_city_current_conditions = ('http://api.wunderground.com/api/{}/'
'conditions/q/{}/{}.json')
@classmethod
def check_call(cls, response):
''' Check that the call status is 200. '''
# todo: raise error if missing certain dict keys
if response.status_code != 200:
print response.text
raise Exception('Return status code is not 200.')
@classmethod
def conditions(cls, us_state, us_city):
''' Lookup weather given location. '''
us_city = us_city.replace(' ', '_')
call = cls.us_city_current_conditions.format(WUAPI, us_state, us_city)
response = requests.get(call)
cls.check_call(response)
info = response.json()
current_obs = info['current_observation']
fields = ['station_id', 'observation_time',
'temp_f', 'relative_humidity']
# todo: spruce-up output
for field in fields:
print current_obs[field]
# todo: expand command line api to lookup and list locations
@classmethod
def lookup_zipcode(cls, zipcode):
''' Lookup location and then lookup weather with first find. '''
call = cls.lookup_zip.format(WUAPI, zipcode)
response = requests.get(call)
cls.check_call(response)
info = response.json()
print info
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: wunder.py [state] [city]'
else:
STATE, CITY = (sys.argv[1], sys.argv[2])
Wunder.conditions(STATE, CITY)
|
Add basic Wunderground API wrapper.#!/usr/bin/env python
'''
Wunderground API command line script.
'''
import sys
import requests
# todo: raise error if no config file
from config import WUAPI
class Wunder(object):
''' Wunderground API class. '''
lookup_zip = 'http://api.wunderground.com/api/{}/geolookup/q/{}.json'
us_city_current_conditions = ('http://api.wunderground.com/api/{}/'
'conditions/q/{}/{}.json')
@classmethod
def check_call(cls, response):
''' Check that the call status is 200. '''
# todo: raise error if missing certain dict keys
if response.status_code != 200:
print response.text
raise Exception('Return status code is not 200.')
@classmethod
def conditions(cls, us_state, us_city):
''' Lookup weather given location. '''
us_city = us_city.replace(' ', '_')
call = cls.us_city_current_conditions.format(WUAPI, us_state, us_city)
response = requests.get(call)
cls.check_call(response)
info = response.json()
current_obs = info['current_observation']
fields = ['station_id', 'observation_time',
'temp_f', 'relative_humidity']
# todo: spruce-up output
for field in fields:
print current_obs[field]
# todo: expand command line api to lookup and list locations
@classmethod
def lookup_zipcode(cls, zipcode):
''' Lookup location and then lookup weather with first find. '''
call = cls.lookup_zip.format(WUAPI, zipcode)
response = requests.get(call)
cls.check_call(response)
info = response.json()
print info
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: wunder.py [state] [city]'
else:
STATE, CITY = (sys.argv[1], sys.argv[2])
Wunder.conditions(STATE, CITY)
|
<commit_before><commit_msg>Add basic Wunderground API wrapper.<commit_after>#!/usr/bin/env python
'''
Wunderground API command line script.
'''
import sys
import requests
# todo: raise error if no config file
from config import WUAPI
class Wunder(object):
''' Wunderground API class. '''
lookup_zip = 'http://api.wunderground.com/api/{}/geolookup/q/{}.json'
us_city_current_conditions = ('http://api.wunderground.com/api/{}/'
'conditions/q/{}/{}.json')
@classmethod
def check_call(cls, response):
''' Check that the call status is 200. '''
# todo: raise error if missing certain dict keys
if response.status_code != 200:
print response.text
raise Exception('Return status code is not 200.')
@classmethod
def conditions(cls, us_state, us_city):
''' Lookup weather given location. '''
us_city = us_city.replace(' ', '_')
call = cls.us_city_current_conditions.format(WUAPI, us_state, us_city)
response = requests.get(call)
cls.check_call(response)
info = response.json()
current_obs = info['current_observation']
fields = ['station_id', 'observation_time',
'temp_f', 'relative_humidity']
# todo: spruce-up output
for field in fields:
print current_obs[field]
# todo: expand command line api to lookup and list locations
@classmethod
def lookup_zipcode(cls, zipcode):
''' Lookup location and then lookup weather with first find. '''
call = cls.lookup_zip.format(WUAPI, zipcode)
response = requests.get(call)
cls.check_call(response)
info = response.json()
print info
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: wunder.py [state] [city]'
else:
STATE, CITY = (sys.argv[1], sys.argv[2])
Wunder.conditions(STATE, CITY)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.