commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd2e527f86427257de0686269e6b3a9d74314249
|
thunderdome_logging/utils.py
|
thunderdome_logging/utils.py
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import thunderdome
from thunderdome import OUT
from thunderdome_logging.graph_handler import Errors
def get_errors(vid, max_num_errors=5):
"""
Returns the last max_num_errors for the vertex with the given vid.
:param vid: The UUID uniquely identifying the vertex in the graph
:type vid: str
:param max_num_errors: The maximum number of errors to be returned
:type max_num_errors: int
:rtype: list
"""
vert = thunderdome.Vertex.get(vid)
q = vert.query()
return q.labels(Errors).direction(OUT).limit(max_num_errors).vertices()
|
Add Utility Method For Retrieving Error Vertices
|
Add Utility Method For Retrieving Error Vertices
|
Python
|
mit
|
StartTheShift/thunderdome-logging,StartTheShift/thunderdome-logging
|
Add Utility Method For Retrieving Error Vertices
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import thunderdome
from thunderdome import OUT
from thunderdome_logging.graph_handler import Errors
def get_errors(vid, max_num_errors=5):
"""
Returns the last max_num_errors for the vertex with the given vid.
:param vid: The UUID uniquely identifying the vertex in the graph
:type vid: str
:param max_num_errors: The maximum number of errors to be returned
:type max_num_errors: int
:rtype: list
"""
vert = thunderdome.Vertex.get(vid)
q = vert.query()
return q.labels(Errors).direction(OUT).limit(max_num_errors).vertices()
|
<commit_before><commit_msg>Add Utility Method For Retrieving Error Vertices<commit_after>
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import thunderdome
from thunderdome import OUT
from thunderdome_logging.graph_handler import Errors
def get_errors(vid, max_num_errors=5):
"""
Returns the last max_num_errors for the vertex with the given vid.
:param vid: The UUID uniquely identifying the vertex in the graph
:type vid: str
:param max_num_errors: The maximum number of errors to be returned
:type max_num_errors: int
:rtype: list
"""
vert = thunderdome.Vertex.get(vid)
q = vert.query()
return q.labels(Errors).direction(OUT).limit(max_num_errors).vertices()
|
Add Utility Method For Retrieving Error Vertices# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import thunderdome
from thunderdome import OUT
from thunderdome_logging.graph_handler import Errors
def get_errors(vid, max_num_errors=5):
"""
Returns the last max_num_errors for the vertex with the given vid.
:param vid: The UUID uniquely identifying the vertex in the graph
:type vid: str
:param max_num_errors: The maximum number of errors to be returned
:type max_num_errors: int
:rtype: list
"""
vert = thunderdome.Vertex.get(vid)
q = vert.query()
return q.labels(Errors).direction(OUT).limit(max_num_errors).vertices()
|
<commit_before><commit_msg>Add Utility Method For Retrieving Error Vertices<commit_after># Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import thunderdome
from thunderdome import OUT
from thunderdome_logging.graph_handler import Errors
def get_errors(vid, max_num_errors=5):
"""
Returns the last max_num_errors for the vertex with the given vid.
:param vid: The UUID uniquely identifying the vertex in the graph
:type vid: str
:param max_num_errors: The maximum number of errors to be returned
:type max_num_errors: int
:rtype: list
"""
vert = thunderdome.Vertex.get(vid)
q = vert.query()
return q.labels(Errors).direction(OUT).limit(max_num_errors).vertices()
|
|
8f72e083e436198adcf113cb0abaa8af96f8caf1
|
vigir_ltl_specification/test/unit/ts_specification_test.py
|
vigir_ltl_specification/test/unit/ts_specification_test.py
|
#!/usr/bin/env python
import unittest
from vigir_ltl_specification.ts_specification import *
class SpecificationConstructionTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes formulas"""
def setUp(self):
"""Gets called before every test case."""
self.spec_name = 'test'
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(name = self.spec_name,
ts = self.ts)
print("Setting up a new specification construction test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.ts, self.spec
def test_input_to_object(self):
self.assertEqual(self.spec_name, self.spec.spec_name)
self.assertItemsEqual(self.ts, self.spec.ts)
def test_ts_of_interest(self):
props = ['r1', 'r3']
ts_of_interest = {'r1': ['r1', 'r3'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(ts = self.ts,
props_of_interest = props)
self.assertItemsEqual(ts_of_interest, self.spec.ts)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
Add unit tests for ts_specification
|
[vigir_ltl_specification][test] Add unit tests for ts_specification
|
Python
|
bsd-3-clause
|
team-vigir/vigir_behavior_synthesis,team-vigir/vigir_behavior_synthesis
|
[vigir_ltl_specification][test] Add unit tests for ts_specification
|
#!/usr/bin/env python
import unittest
from vigir_ltl_specification.ts_specification import *
class SpecificationConstructionTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes formulas"""
def setUp(self):
"""Gets called before every test case."""
self.spec_name = 'test'
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(name = self.spec_name,
ts = self.ts)
print("Setting up a new specification construction test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.ts, self.spec
def test_input_to_object(self):
self.assertEqual(self.spec_name, self.spec.spec_name)
self.assertItemsEqual(self.ts, self.spec.ts)
def test_ts_of_interest(self):
props = ['r1', 'r3']
ts_of_interest = {'r1': ['r1', 'r3'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(ts = self.ts,
props_of_interest = props)
self.assertItemsEqual(ts_of_interest, self.spec.ts)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
<commit_before><commit_msg>[vigir_ltl_specification][test] Add unit tests for ts_specification<commit_after>
|
#!/usr/bin/env python
import unittest
from vigir_ltl_specification.ts_specification import *
class SpecificationConstructionTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes formulas"""
def setUp(self):
"""Gets called before every test case."""
self.spec_name = 'test'
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(name = self.spec_name,
ts = self.ts)
print("Setting up a new specification construction test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.ts, self.spec
def test_input_to_object(self):
self.assertEqual(self.spec_name, self.spec.spec_name)
self.assertItemsEqual(self.ts, self.spec.ts)
def test_ts_of_interest(self):
props = ['r1', 'r3']
ts_of_interest = {'r1': ['r1', 'r3'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(ts = self.ts,
props_of_interest = props)
self.assertItemsEqual(ts_of_interest, self.spec.ts)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
[vigir_ltl_specification][test] Add unit tests for ts_specification#!/usr/bin/env python
import unittest
from vigir_ltl_specification.ts_specification import *
class SpecificationConstructionTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes formulas"""
def setUp(self):
"""Gets called before every test case."""
self.spec_name = 'test'
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(name = self.spec_name,
ts = self.ts)
print("Setting up a new specification construction test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.ts, self.spec
def test_input_to_object(self):
self.assertEqual(self.spec_name, self.spec.spec_name)
self.assertItemsEqual(self.ts, self.spec.ts)
def test_ts_of_interest(self):
props = ['r1', 'r3']
ts_of_interest = {'r1': ['r1', 'r3'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(ts = self.ts,
props_of_interest = props)
self.assertItemsEqual(ts_of_interest, self.spec.ts)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
<commit_before><commit_msg>[vigir_ltl_specification][test] Add unit tests for ts_specification<commit_after>#!/usr/bin/env python
import unittest
from vigir_ltl_specification.ts_specification import *
class SpecificationConstructionTests(unittest.TestCase):
"""Test the generation of Activation-Outcomes formulas"""
def setUp(self):
"""Gets called before every test case."""
self.spec_name = 'test'
self.ts = {'r1': ['r1', 'r2', 'r3'],
'r2': ['r2'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(name = self.spec_name,
ts = self.ts)
print("Setting up a new specification construction test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.ts, self.spec
def test_input_to_object(self):
self.assertEqual(self.spec_name, self.spec.spec_name)
self.assertItemsEqual(self.ts, self.spec.ts)
def test_ts_of_interest(self):
props = ['r1', 'r3']
ts_of_interest = {'r1': ['r1', 'r3'],
'r3': ['r3', 'r1']}
self.spec = TransitionSystemSpecification(ts = self.ts,
props_of_interest = props)
self.assertItemsEqual(ts_of_interest, self.spec.ts)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
|
4601de20b49245f088bdab69d5e7d429841cf345
|
popit/migrations/0058_auto_20170418_0745.py
|
popit/migrations/0058_auto_20170418_0745.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-04-18 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('popit', '0057_auto_20170315_0222'),
]
operations = [
migrations.AlterField(
model_name='relation',
name='object',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_object', to='popit.Person', verbose_name='object'),
),
migrations.AlterField(
model_name='relation',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_subject', to='popit.Person', verbose_name='subject'),
),
]
|
Add forgotten migration for relation related_name change
|
Add forgotten migration for relation related_name change
|
Python
|
agpl-3.0
|
Sinar/popit_ng,Sinar/popit_ng
|
Add forgotten migration for relation related_name change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-04-18 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('popit', '0057_auto_20170315_0222'),
]
operations = [
migrations.AlterField(
model_name='relation',
name='object',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_object', to='popit.Person', verbose_name='object'),
),
migrations.AlterField(
model_name='relation',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_subject', to='popit.Person', verbose_name='subject'),
),
]
|
<commit_before><commit_msg>Add forgotten migration for relation related_name change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-04-18 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('popit', '0057_auto_20170315_0222'),
]
operations = [
migrations.AlterField(
model_name='relation',
name='object',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_object', to='popit.Person', verbose_name='object'),
),
migrations.AlterField(
model_name='relation',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_subject', to='popit.Person', verbose_name='subject'),
),
]
|
Add forgotten migration for relation related_name change# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-04-18 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('popit', '0057_auto_20170315_0222'),
]
operations = [
migrations.AlterField(
model_name='relation',
name='object',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_object', to='popit.Person', verbose_name='object'),
),
migrations.AlterField(
model_name='relation',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_subject', to='popit.Person', verbose_name='subject'),
),
]
|
<commit_before><commit_msg>Add forgotten migration for relation related_name change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-04-18 07:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('popit', '0057_auto_20170315_0222'),
]
operations = [
migrations.AlterField(
model_name='relation',
name='object',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_object', to='popit.Person', verbose_name='object'),
),
migrations.AlterField(
model_name='relation',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relations_as_subject', to='popit.Person', verbose_name='subject'),
),
]
|
|
e0d71d17ef62bb4e515462c2a53a5dd7172cd15d
|
DilipadTopicModelling/experiment_number_of_topics.py
|
DilipadTopicModelling/experiment_number_of_topics.py
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
Add script to run a series of experiments
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
<commit_before><commit_msg>Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.<commit_after>
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
<commit_before><commit_msg>Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.<commit_after>import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
|
fafb2c00597a99947f3e7a344e97551a390bda08
|
alembic/versions/5563ca9e7626_create_request_searc.py
|
alembic/versions/5563ca9e7626_create_request_searc.py
|
"""Create request search column and trigger.
Revision ID: 5563ca9e7626
Revises: 30d3af507801
Create Date: 2014-03-06 13:13:52.831868
"""
# revision identifiers, used by Alembic.
revision = '5563ca9e7626'
down_revision = '30d3af507801'
from alembic import op
import sqlalchemy as sa
def upgrade():
# TODO(cj@postcode.io): This should probably be rewritten using Alembic and SQLAlchemy classes,
# but I couldn't find the docs on using `to_tsvector` in this context. Le sigh.
op.execute("CREATE INDEX request_search_index ON request USING gin(to_tsvector('english', text))")
def downgrade():
op.drop_index("request_search_index", "request")
|
Create a searchable index on the request text field.
|
Create a searchable index on the request text field.
|
Python
|
apache-2.0
|
CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords
|
Create a searchable index on the request text field.
|
"""Create request search column and trigger.
Revision ID: 5563ca9e7626
Revises: 30d3af507801
Create Date: 2014-03-06 13:13:52.831868
"""
# revision identifiers, used by Alembic.
revision = '5563ca9e7626'
down_revision = '30d3af507801'
from alembic import op
import sqlalchemy as sa
def upgrade():
# TODO(cj@postcode.io): This should probably be rewritten using Alembic and SQLAlchemy classes,
# but I couldn't find the docs on using `to_tsvector` in this context. Le sigh.
op.execute("CREATE INDEX request_search_index ON request USING gin(to_tsvector('english', text))")
def downgrade():
op.drop_index("request_search_index", "request")
|
<commit_before><commit_msg>Create a searchable index on the request text field.<commit_after>
|
"""Create request search column and trigger.
Revision ID: 5563ca9e7626
Revises: 30d3af507801
Create Date: 2014-03-06 13:13:52.831868
"""
# revision identifiers, used by Alembic.
revision = '5563ca9e7626'
down_revision = '30d3af507801'
from alembic import op
import sqlalchemy as sa
def upgrade():
# TODO(cj@postcode.io): This should probably be rewritten using Alembic and SQLAlchemy classes,
# but I couldn't find the docs on using `to_tsvector` in this context. Le sigh.
op.execute("CREATE INDEX request_search_index ON request USING gin(to_tsvector('english', text))")
def downgrade():
op.drop_index("request_search_index", "request")
|
Create a searchable index on the request text field."""Create request search column and trigger.
Revision ID: 5563ca9e7626
Revises: 30d3af507801
Create Date: 2014-03-06 13:13:52.831868
"""
# revision identifiers, used by Alembic.
revision = '5563ca9e7626'
down_revision = '30d3af507801'
from alembic import op
import sqlalchemy as sa
def upgrade():
# TODO(cj@postcode.io): This should probably be rewritten using Alembic and SQLAlchemy classes,
# but I couldn't find the docs on using `to_tsvector` in this context. Le sigh.
op.execute("CREATE INDEX request_search_index ON request USING gin(to_tsvector('english', text))")
def downgrade():
op.drop_index("request_search_index", "request")
|
<commit_before><commit_msg>Create a searchable index on the request text field.<commit_after>"""Create request search column and trigger.
Revision ID: 5563ca9e7626
Revises: 30d3af507801
Create Date: 2014-03-06 13:13:52.831868
"""
# revision identifiers, used by Alembic.
revision = '5563ca9e7626'
down_revision = '30d3af507801'
from alembic import op
import sqlalchemy as sa
def upgrade():
# TODO(cj@postcode.io): This should probably be rewritten using Alembic and SQLAlchemy classes,
# but I couldn't find the docs on using `to_tsvector` in this context. Le sigh.
op.execute("CREATE INDEX request_search_index ON request USING gin(to_tsvector('english', text))")
def downgrade():
op.drop_index("request_search_index", "request")
|
|
67153408c30726728a1bf9ec9e03ff869b306174
|
conf_site/proposals/tests/test_proposal_management.py
|
conf_site/proposals/tests/test_proposal_management.py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.crypto import get_random_string
from symposion.speakers.models import Speaker
from conf_site.proposals.tests import ProposalTestCase
class ProposalSpeakerManageViewTestCase(ProposalTestCase):
"""Automated test cases for symposion's proposal_speaker_manage view."""
def setUp(self):
super(ProposalSpeakerManageViewTestCase, self).setUp()
user_model = get_user_model()
USER_EMAIL = "example@example.com"
USER_PASSWORD = get_random_string()
self.user = user_model.objects.create_user(
username="user", email=USER_EMAIL, password=USER_PASSWORD
)
speaker = Speaker.objects.create(name="Nancy Pelosi")
speaker.user = self.user
speaker.save()
# Overwrite speaker for this case's proposal - sorry, Paul Ryan.
self.proposal.speaker = speaker
self.proposal.save()
self.assertTrue(
self.client.login(username=USER_EMAIL, password=USER_PASSWORD)
)
def test_verify_proposal_jacking_does_not_work(self):
"""Verify that you can't manage a proposal that is not yours."""
# Create a new speaker and change ownership of this
# test case's Proposal to said speaker.
other_speaker = Speaker.objects.create(name="Other Speaker")
self.proposal.speaker = other_speaker
self.proposal.save()
response = self.client.get(
reverse("proposal_speaker_manage", args=[self.proposal.pk])
)
self.assertEqual(response.status_code, 404)
|
Add automated tests for proposal_speaker_manage.
|
Add automated tests for proposal_speaker_manage.
|
Python
|
mit
|
pydata/conf_site,pydata/conf_site,pydata/conf_site
|
Add automated tests for proposal_speaker_manage.
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.crypto import get_random_string
from symposion.speakers.models import Speaker
from conf_site.proposals.tests import ProposalTestCase
class ProposalSpeakerManageViewTestCase(ProposalTestCase):
"""Automated test cases for symposion's proposal_speaker_manage view."""
def setUp(self):
super(ProposalSpeakerManageViewTestCase, self).setUp()
user_model = get_user_model()
USER_EMAIL = "example@example.com"
USER_PASSWORD = get_random_string()
self.user = user_model.objects.create_user(
username="user", email=USER_EMAIL, password=USER_PASSWORD
)
speaker = Speaker.objects.create(name="Nancy Pelosi")
speaker.user = self.user
speaker.save()
# Overwrite speaker for this case's proposal - sorry, Paul Ryan.
self.proposal.speaker = speaker
self.proposal.save()
self.assertTrue(
self.client.login(username=USER_EMAIL, password=USER_PASSWORD)
)
def test_verify_proposal_jacking_does_not_work(self):
"""Verify that you can't manage a proposal that is not yours."""
# Create a new speaker and change ownership of this
# test case's Proposal to said speaker.
other_speaker = Speaker.objects.create(name="Other Speaker")
self.proposal.speaker = other_speaker
self.proposal.save()
response = self.client.get(
reverse("proposal_speaker_manage", args=[self.proposal.pk])
)
self.assertEqual(response.status_code, 404)
|
<commit_before><commit_msg>Add automated tests for proposal_speaker_manage.<commit_after>
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.crypto import get_random_string
from symposion.speakers.models import Speaker
from conf_site.proposals.tests import ProposalTestCase
class ProposalSpeakerManageViewTestCase(ProposalTestCase):
"""Automated test cases for symposion's proposal_speaker_manage view."""
def setUp(self):
super(ProposalSpeakerManageViewTestCase, self).setUp()
user_model = get_user_model()
USER_EMAIL = "example@example.com"
USER_PASSWORD = get_random_string()
self.user = user_model.objects.create_user(
username="user", email=USER_EMAIL, password=USER_PASSWORD
)
speaker = Speaker.objects.create(name="Nancy Pelosi")
speaker.user = self.user
speaker.save()
# Overwrite speaker for this case's proposal - sorry, Paul Ryan.
self.proposal.speaker = speaker
self.proposal.save()
self.assertTrue(
self.client.login(username=USER_EMAIL, password=USER_PASSWORD)
)
def test_verify_proposal_jacking_does_not_work(self):
"""Verify that you can't manage a proposal that is not yours."""
# Create a new speaker and change ownership of this
# test case's Proposal to said speaker.
other_speaker = Speaker.objects.create(name="Other Speaker")
self.proposal.speaker = other_speaker
self.proposal.save()
response = self.client.get(
reverse("proposal_speaker_manage", args=[self.proposal.pk])
)
self.assertEqual(response.status_code, 404)
|
Add automated tests for proposal_speaker_manage.from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.crypto import get_random_string
from symposion.speakers.models import Speaker
from conf_site.proposals.tests import ProposalTestCase
class ProposalSpeakerManageViewTestCase(ProposalTestCase):
"""Automated test cases for symposion's proposal_speaker_manage view."""
def setUp(self):
super(ProposalSpeakerManageViewTestCase, self).setUp()
user_model = get_user_model()
USER_EMAIL = "example@example.com"
USER_PASSWORD = get_random_string()
self.user = user_model.objects.create_user(
username="user", email=USER_EMAIL, password=USER_PASSWORD
)
speaker = Speaker.objects.create(name="Nancy Pelosi")
speaker.user = self.user
speaker.save()
# Overwrite speaker for this case's proposal - sorry, Paul Ryan.
self.proposal.speaker = speaker
self.proposal.save()
self.assertTrue(
self.client.login(username=USER_EMAIL, password=USER_PASSWORD)
)
def test_verify_proposal_jacking_does_not_work(self):
"""Verify that you can't manage a proposal that is not yours."""
# Create a new speaker and change ownership of this
# test case's Proposal to said speaker.
other_speaker = Speaker.objects.create(name="Other Speaker")
self.proposal.speaker = other_speaker
self.proposal.save()
response = self.client.get(
reverse("proposal_speaker_manage", args=[self.proposal.pk])
)
self.assertEqual(response.status_code, 404)
|
<commit_before><commit_msg>Add automated tests for proposal_speaker_manage.<commit_after>from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.crypto import get_random_string
from symposion.speakers.models import Speaker
from conf_site.proposals.tests import ProposalTestCase
class ProposalSpeakerManageViewTestCase(ProposalTestCase):
"""Automated test cases for symposion's proposal_speaker_manage view."""
def setUp(self):
super(ProposalSpeakerManageViewTestCase, self).setUp()
user_model = get_user_model()
USER_EMAIL = "example@example.com"
USER_PASSWORD = get_random_string()
self.user = user_model.objects.create_user(
username="user", email=USER_EMAIL, password=USER_PASSWORD
)
speaker = Speaker.objects.create(name="Nancy Pelosi")
speaker.user = self.user
speaker.save()
# Overwrite speaker for this case's proposal - sorry, Paul Ryan.
self.proposal.speaker = speaker
self.proposal.save()
self.assertTrue(
self.client.login(username=USER_EMAIL, password=USER_PASSWORD)
)
def test_verify_proposal_jacking_does_not_work(self):
"""Verify that you can't manage a proposal that is not yours."""
# Create a new speaker and change ownership of this
# test case's Proposal to said speaker.
other_speaker = Speaker.objects.create(name="Other Speaker")
self.proposal.speaker = other_speaker
self.proposal.save()
response = self.client.get(
reverse("proposal_speaker_manage", args=[self.proposal.pk])
)
self.assertEqual(response.status_code, 404)
|
|
1fa31c04dbd323af36a5b0cb606aa49e0b1c0359
|
genome_designer/debug/modify_jbrowse_track_config.py
|
genome_designer/debug/modify_jbrowse_track_config.py
|
"""Functions for manipulating JBrowse configs.
NOTE: User responsible for managing backups / not breaking anything.
"""
import json
TRACK_LIST_CONFIG = '/dep_data/temp_data/projects/3bc32fc9/ref_genomes/01166f51/jbrowse/trackList.json'
def main():
with open(TRACK_LIST_CONFIG) as fh:
config_json = json.loads(fh.read())
tracks = config_json['tracks']
for track in tracks:
track['chunkSizeLimit'] = 1000000000
track['maxHeight'] = 10000
with open(TRACK_LIST_CONFIG, 'w') as output_fh:
output_fh.write(json.dumps(config_json))
if __name__ == '__main__':
main()
|
Debug script to modify jbrowse track config manually.
|
Debug script to modify jbrowse track config manually.
|
Python
|
mit
|
churchlab/millstone,woodymit/millstone_accidental_source,churchlab/millstone,woodymit/millstone,woodymit/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone,churchlab/millstone,churchlab/millstone,woodymit/millstone
|
Debug script to modify jbrowse track config manually.
|
"""Functions for manipulating JBrowse configs.
NOTE: User responsible for managing backups / not breaking anything.
"""
import json
TRACK_LIST_CONFIG = '/dep_data/temp_data/projects/3bc32fc9/ref_genomes/01166f51/jbrowse/trackList.json'
def main():
with open(TRACK_LIST_CONFIG) as fh:
config_json = json.loads(fh.read())
tracks = config_json['tracks']
for track in tracks:
track['chunkSizeLimit'] = 1000000000
track['maxHeight'] = 10000
with open(TRACK_LIST_CONFIG, 'w') as output_fh:
output_fh.write(json.dumps(config_json))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Debug script to modify jbrowse track config manually.<commit_after>
|
"""Functions for manipulating JBrowse configs.
NOTE: User responsible for managing backups / not breaking anything.
"""
import json
TRACK_LIST_CONFIG = '/dep_data/temp_data/projects/3bc32fc9/ref_genomes/01166f51/jbrowse/trackList.json'
def main():
with open(TRACK_LIST_CONFIG) as fh:
config_json = json.loads(fh.read())
tracks = config_json['tracks']
for track in tracks:
track['chunkSizeLimit'] = 1000000000
track['maxHeight'] = 10000
with open(TRACK_LIST_CONFIG, 'w') as output_fh:
output_fh.write(json.dumps(config_json))
if __name__ == '__main__':
main()
|
Debug script to modify jbrowse track config manually."""Functions for manipulating JBrowse configs.
NOTE: User responsible for managing backups / not breaking anything.
"""
import json
TRACK_LIST_CONFIG = '/dep_data/temp_data/projects/3bc32fc9/ref_genomes/01166f51/jbrowse/trackList.json'
def main():
with open(TRACK_LIST_CONFIG) as fh:
config_json = json.loads(fh.read())
tracks = config_json['tracks']
for track in tracks:
track['chunkSizeLimit'] = 1000000000
track['maxHeight'] = 10000
with open(TRACK_LIST_CONFIG, 'w') as output_fh:
output_fh.write(json.dumps(config_json))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Debug script to modify jbrowse track config manually.<commit_after>"""Functions for manipulating JBrowse configs.
NOTE: User responsible for managing backups / not breaking anything.
"""
import json
TRACK_LIST_CONFIG = '/dep_data/temp_data/projects/3bc32fc9/ref_genomes/01166f51/jbrowse/trackList.json'
def main():
with open(TRACK_LIST_CONFIG) as fh:
config_json = json.loads(fh.read())
tracks = config_json['tracks']
for track in tracks:
track['chunkSizeLimit'] = 1000000000
track['maxHeight'] = 10000
with open(TRACK_LIST_CONFIG, 'w') as output_fh:
output_fh.write(json.dumps(config_json))
if __name__ == '__main__':
main()
|
|
d0435bdd72576b84b1add45e6871c2647706b728
|
scrape-10k.py
|
scrape-10k.py
|
import csv
import time
import requests
import lxml.html
top10k = {}
for page_index in range(1, 201):
print('Requesting page {}'.format(page_index))
url = 'https://osu.ppy.sh/p/pp/'
payload = {
'm': 0, # osu! standard gamemode
'o': 1, # descending order
'page': page_index,
}
page = requests.get(url, params=payload)
tree = lxml.html.document_fromstring(page.text)
print('Processing page {}'.format(page_index))
rows = tree.cssselect('tr a')
for row in rows:
user_name = row.text
user_id = row.attrib['href'][3:]
top10k[user_id] = user_name
print(user_name, user_id)
time.sleep(1) # Be nice and slow down
with open('10k.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for user_id, user_name in top10k.items():
writer.writerow([user_id, user_name])
|
Add script to scrape top 10k players
|
Add script to scrape top 10k players
|
Python
|
mit
|
Cyanogenoid/osu-modspecific-rank
|
Add script to scrape top 10k players
|
import csv
import time
import requests
import lxml.html
top10k = {}
for page_index in range(1, 201):
print('Requesting page {}'.format(page_index))
url = 'https://osu.ppy.sh/p/pp/'
payload = {
'm': 0, # osu! standard gamemode
'o': 1, # descending order
'page': page_index,
}
page = requests.get(url, params=payload)
tree = lxml.html.document_fromstring(page.text)
print('Processing page {}'.format(page_index))
rows = tree.cssselect('tr a')
for row in rows:
user_name = row.text
user_id = row.attrib['href'][3:]
top10k[user_id] = user_name
print(user_name, user_id)
time.sleep(1) # Be nice and slow down
with open('10k.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for user_id, user_name in top10k.items():
writer.writerow([user_id, user_name])
|
<commit_before><commit_msg>Add script to scrape top 10k players<commit_after>
|
import csv
import time
import requests
import lxml.html
top10k = {}
for page_index in range(1, 201):
print('Requesting page {}'.format(page_index))
url = 'https://osu.ppy.sh/p/pp/'
payload = {
'm': 0, # osu! standard gamemode
'o': 1, # descending order
'page': page_index,
}
page = requests.get(url, params=payload)
tree = lxml.html.document_fromstring(page.text)
print('Processing page {}'.format(page_index))
rows = tree.cssselect('tr a')
for row in rows:
user_name = row.text
user_id = row.attrib['href'][3:]
top10k[user_id] = user_name
print(user_name, user_id)
time.sleep(1) # Be nice and slow down
with open('10k.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for user_id, user_name in top10k.items():
writer.writerow([user_id, user_name])
|
Add script to scrape top 10k playersimport csv
import time
import requests
import lxml.html
top10k = {}
for page_index in range(1, 201):
print('Requesting page {}'.format(page_index))
url = 'https://osu.ppy.sh/p/pp/'
payload = {
'm': 0, # osu! standard gamemode
'o': 1, # descending order
'page': page_index,
}
page = requests.get(url, params=payload)
tree = lxml.html.document_fromstring(page.text)
print('Processing page {}'.format(page_index))
rows = tree.cssselect('tr a')
for row in rows:
user_name = row.text
user_id = row.attrib['href'][3:]
top10k[user_id] = user_name
print(user_name, user_id)
time.sleep(1) # Be nice and slow down
with open('10k.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for user_id, user_name in top10k.items():
writer.writerow([user_id, user_name])
|
<commit_before><commit_msg>Add script to scrape top 10k players<commit_after>import csv
import time
import requests
import lxml.html
top10k = {}
for page_index in range(1, 201):
print('Requesting page {}'.format(page_index))
url = 'https://osu.ppy.sh/p/pp/'
payload = {
'm': 0, # osu! standard gamemode
'o': 1, # descending order
'page': page_index,
}
page = requests.get(url, params=payload)
tree = lxml.html.document_fromstring(page.text)
print('Processing page {}'.format(page_index))
rows = tree.cssselect('tr a')
for row in rows:
user_name = row.text
user_id = row.attrib['href'][3:]
top10k[user_id] = user_name
print(user_name, user_id)
time.sleep(1) # Be nice and slow down
with open('10k.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for user_id, user_name in top10k.items():
writer.writerow([user_id, user_name])
|
|
2bda9d0e746d4abe64f0a21803fbc07e244bc96b
|
djconnectwise/migrations/0008_auto_20170215_1430.py
|
djconnectwise/migrations/0008_auto_20170215_1430.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0007_auto_20170215_1918'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies', 'ordering': ('company_identifier',)},
),
migrations.AlterModelOptions(
name='member',
options={'ordering': ('first_name', 'last_name')},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='serviceticket',
options={'verbose_name_plural': 'Service Tickets', 'verbose_name': 'Service Ticket', 'ordering': ('summary',)},
),
migrations.AlterModelOptions(
name='ticketpriority',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='ticketstatus',
options={'verbose_name_plural': 'ticket statuses', 'ordering': ('ticket_status',)},
),
]
|
Add a migration for model meta options that should have been added earlier
|
Add a migration for model meta options that should have been added earlier
|
Python
|
mit
|
AparatTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise
|
Add a migration for model meta options that should have been added earlier
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0007_auto_20170215_1918'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies', 'ordering': ('company_identifier',)},
),
migrations.AlterModelOptions(
name='member',
options={'ordering': ('first_name', 'last_name')},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='serviceticket',
options={'verbose_name_plural': 'Service Tickets', 'verbose_name': 'Service Ticket', 'ordering': ('summary',)},
),
migrations.AlterModelOptions(
name='ticketpriority',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='ticketstatus',
options={'verbose_name_plural': 'ticket statuses', 'ordering': ('ticket_status',)},
),
]
|
<commit_before><commit_msg>Add a migration for model meta options that should have been added earlier<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0007_auto_20170215_1918'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies', 'ordering': ('company_identifier',)},
),
migrations.AlterModelOptions(
name='member',
options={'ordering': ('first_name', 'last_name')},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='serviceticket',
options={'verbose_name_plural': 'Service Tickets', 'verbose_name': 'Service Ticket', 'ordering': ('summary',)},
),
migrations.AlterModelOptions(
name='ticketpriority',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='ticketstatus',
options={'verbose_name_plural': 'ticket statuses', 'ordering': ('ticket_status',)},
),
]
|
Add a migration for model meta options that should have been added earlier# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0007_auto_20170215_1918'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies', 'ordering': ('company_identifier',)},
),
migrations.AlterModelOptions(
name='member',
options={'ordering': ('first_name', 'last_name')},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='serviceticket',
options={'verbose_name_plural': 'Service Tickets', 'verbose_name': 'Service Ticket', 'ordering': ('summary',)},
),
migrations.AlterModelOptions(
name='ticketpriority',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='ticketstatus',
options={'verbose_name_plural': 'ticket statuses', 'ordering': ('ticket_status',)},
),
]
|
<commit_before><commit_msg>Add a migration for model meta options that should have been added earlier<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0007_auto_20170215_1918'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'companies', 'ordering': ('company_identifier',)},
),
migrations.AlterModelOptions(
name='member',
options={'ordering': ('first_name', 'last_name')},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='serviceticket',
options={'verbose_name_plural': 'Service Tickets', 'verbose_name': 'Service Ticket', 'ordering': ('summary',)},
),
migrations.AlterModelOptions(
name='ticketpriority',
options={'ordering': ('name',)},
),
migrations.AlterModelOptions(
name='ticketstatus',
options={'verbose_name_plural': 'ticket statuses', 'ordering': ('ticket_status',)},
),
]
|
|
505671b698490918fe0ea6c6dfdab8c0b25339be
|
tests/basics/subclass_native_containment.py
|
tests/basics/subclass_native_containment.py
|
# test containment operator on subclass of a native type
class mylist(list):
pass
class mydict(dict):
pass
class mybytes(bytes):
pass
l = mylist([1, 2, 3])
print(0 in l)
print(1 in l)
d = mydict({1:1, 2:2})
print(0 in l)
print(1 in l)
b = mybytes(b'1234')
print(0 in b)
print(b'1' in b)
|
Add test for containment of a subclass of a native type.
|
tests/basics: Add test for containment of a subclass of a native type.
|
Python
|
mit
|
MrSurly/micropython,dmazzella/micropython,infinnovation/micropython,bvernoux/micropython,selste/micropython,pfalcon/micropython,bvernoux/micropython,pozetroninc/micropython,kerneltask/micropython,torwag/micropython,infinnovation/micropython,ryannathans/micropython,pfalcon/micropython,swegener/micropython,dmazzella/micropython,tobbad/micropython,lowRISC/micropython,adafruit/circuitpython,selste/micropython,tobbad/micropython,adafruit/circuitpython,pramasoul/micropython,swegener/micropython,kerneltask/micropython,torwag/micropython,bvernoux/micropython,blazewicz/micropython,pramasoul/micropython,pozetroninc/micropython,lowRISC/micropython,henriknelson/micropython,adafruit/micropython,tobbad/micropython,adafruit/circuitpython,swegener/micropython,adafruit/circuitpython,ryannathans/micropython,selste/micropython,infinnovation/micropython,tralamazza/micropython,blazewicz/micropython,lowRISC/micropython,ryannathans/micropython,kerneltask/micropython,torwag/micropython,trezor/micropython,adafruit/circuitpython,henriknelson/micropython,pozetroninc/micropython,tobbad/micropython,pramasoul/micropython,selste/micropython,pozetroninc/micropython,trezor/micropython,lowRISC/micropython,bvernoux/micropython,MrSurly/micropython,pramasoul/micropython,trezor/micropython,kerneltask/micropython,infinnovation/micropython,pozetroninc/micropython,tralamazza/micropython,adafruit/micropython,henriknelson/micropython,pfalcon/micropython,adafruit/micropython,torwag/micropython,pramasoul/micropython,tralamazza/micropython,tralamazza/micropython,swegener/micropython,henriknelson/micropython,dmazzella/micropython,lowRISC/micropython,pfalcon/micropython,ryannathans/micropython,torwag/micropython,swegener/micropython,bvernoux/micropython,trezor/micropython,tobbad/micropython,MrSurly/micropython,blazewicz/micropython,trezor/micropython,adafruit/micropython,blazewicz/micropython,MrSurly/micropython,selste/micropython,adafruit/micropython,pfalcon/micropython,ryannathans/micropython,adafruit/circuitpython,blazewicz/micropython,henriknelson/micropython,dmazzella/micropython,kerneltask/micropython,infinnovation/micropython,MrSurly/micropython
|
tests/basics: Add test for containment of a subclass of a native type.
|
# test containment operator on subclass of a native type
class mylist(list):
pass
class mydict(dict):
pass
class mybytes(bytes):
pass
l = mylist([1, 2, 3])
print(0 in l)
print(1 in l)
d = mydict({1:1, 2:2})
print(0 in l)
print(1 in l)
b = mybytes(b'1234')
print(0 in b)
print(b'1' in b)
|
<commit_before><commit_msg>tests/basics: Add test for containment of a subclass of a native type.<commit_after>
|
# test containment operator on subclass of a native type
class mylist(list):
pass
class mydict(dict):
pass
class mybytes(bytes):
pass
l = mylist([1, 2, 3])
print(0 in l)
print(1 in l)
d = mydict({1:1, 2:2})
print(0 in l)
print(1 in l)
b = mybytes(b'1234')
print(0 in b)
print(b'1' in b)
|
tests/basics: Add test for containment of a subclass of a native type.# test containment operator on subclass of a native type
class mylist(list):
pass
class mydict(dict):
pass
class mybytes(bytes):
pass
l = mylist([1, 2, 3])
print(0 in l)
print(1 in l)
d = mydict({1:1, 2:2})
print(0 in l)
print(1 in l)
b = mybytes(b'1234')
print(0 in b)
print(b'1' in b)
|
<commit_before><commit_msg>tests/basics: Add test for containment of a subclass of a native type.<commit_after># test containment operator on subclass of a native type
class mylist(list):
pass
class mydict(dict):
pass
class mybytes(bytes):
pass
l = mylist([1, 2, 3])
print(0 in l)
print(1 in l)
d = mydict({1:1, 2:2})
print(0 in l)
print(1 in l)
b = mybytes(b'1234')
print(0 in b)
print(b'1' in b)
|
|
7d998deb5cfc5dafe24f0ba21e120020ba695447
|
project/scripts/data_generator.py
|
project/scripts/data_generator.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = 50 / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
Add functions for generating missing data in worst case scenario absence of previous data
|
Add functions for generating missing data in worst case scenario absence of previous data
|
Python
|
apache-2.0
|
googleinterns/sgonks,googleinterns/sgonks,googleinterns/sgonks,googleinterns/sgonks
|
Add functions for generating missing data in worst case scenario absence of previous data
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = 50 / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
<commit_before><commit_msg>Add functions for generating missing data in worst case scenario absence of previous data<commit_after>
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = 50 / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
Add functions for generating missing data in worst case scenario absence of previous data# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = 50 / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
<commit_before><commit_msg>Add functions for generating missing data in worst case scenario absence of previous data<commit_after># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import math
from random import gauss
from scipy import special
from dates import get_previous_day, get_next_day
def get_missing_data_point(required_dates, daily_data, date):
"""
Calculate a suitable data point with relevant probability from known info
"""
if get_previous_day(date) in daily_data and get_next_day(date) in daily_data:
# if we have neighbouring data points, generate point with gaussian smoothing
return get_smoothed_value(get_previous_day(date), get_next_day(date))
else:
# if we have no neighbouring data, take a probabilistic guess
return get_gaussian_random(len(required_dates))
def get_gaussian_random(time_range):
"""
Return a random value for the data point between 0 and 100, over a gaussian distribution
determined by the range of data required
"""
# The actual data will be 100 and 0 once each over the respective time range
# so over a larger time range, the probability of this specific entry being high or low
# decreases. We adjust the standard deviation accordingly to generate reasonable values.
lower = 0
upper = 100
mean = 50
chance_of_extremity = 1 / time_range
f = 1 - chance_of_extremity
num_standard_devs = special.erfinv(f) * math.sqrt(2)
standard_dev = 50 / num_standard_devs
value = gauss(mean, standard_dev)
while value < lower or value > upper:
#check if value outside range. This will basically never happen
value = gauss(mean, standard_dev)
return round(value)
def get_smoothed_value(prev, next):
"""
Given the data points for the next and previous days, generate the data point
using modified Gaussian smoothing
"""
# unsophisticated average of neighbouring 2 points
straight_average = (prev + next) / 2
# add some noise with gaussian distribution centered on this point
mean = straight_average
std_dev = abs(straight_average - next) / 5
value = gauss(mean, std_dev) # less than 0.1% chance of not falling between next and prev
return round(value)
|
|
26d5b3964bbe2a42702dd90cb9274287b402d944
|
labmanager/tests/integration/util.py
|
labmanager/tests/integration/util.py
|
import unittest
import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class IntegrationTestCase(unittest.TestCase):
"""
This class wraps Selenium. So the setUp method
will rebuild the database, create a client and the
selenium driver. Subclasses can focus on creating
the proper tests. It also provides two utility
methods: is_element_present, is_alert_present and
close_alert_and_get_its_text.
Creating new tests is as simple as running Firefox
with selenium and running the whole process.
"""
def setUp(self):
# setup gateway4labs environment
# setup selenium environment
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:5000/"
self.verificationErrors = []
self.accept_next_alert = True
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
Add first skeleton of selenium-based test
|
Add first skeleton of selenium-based test
|
Python
|
bsd-2-clause
|
gateway4labs/labmanager,go-lab/labmanager,morelab/labmanager,morelab/labmanager,labsland/labmanager,gateway4labs/labmanager,morelab/labmanager,labsland/labmanager,labsland/labmanager,labsland/labmanager,go-lab/labmanager,porduna/labmanager,morelab/labmanager,go-lab/labmanager,go-lab/labmanager,porduna/labmanager,porduna/labmanager,gateway4labs/labmanager,porduna/labmanager,gateway4labs/labmanager
|
Add first skeleton of selenium-based test
|
import unittest
import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class IntegrationTestCase(unittest.TestCase):
"""
This class wraps Selenium. So the setUp method
will rebuild the database, create a client and the
selenium driver. Subclasses can focus on creating
the proper tests. It also provides two utility
methods: is_element_present, is_alert_present and
close_alert_and_get_its_text.
Creating new tests is as simple as running Firefox
with selenium and running the whole process.
"""
def setUp(self):
# setup gateway4labs environment
# setup selenium environment
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:5000/"
self.verificationErrors = []
self.accept_next_alert = True
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add first skeleton of selenium-based test<commit_after>
|
import unittest
import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class IntegrationTestCase(unittest.TestCase):
"""
This class wraps Selenium. So the setUp method
will rebuild the database, create a client and the
selenium driver. Subclasses can focus on creating
the proper tests. It also provides two utility
methods: is_element_present, is_alert_present and
close_alert_and_get_its_text.
Creating new tests is as simple as running Firefox
with selenium and running the whole process.
"""
def setUp(self):
# setup gateway4labs environment
# setup selenium environment
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:5000/"
self.verificationErrors = []
self.accept_next_alert = True
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
Add first skeleton of selenium-based testimport unittest
import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class IntegrationTestCase(unittest.TestCase):
"""
This class wraps Selenium. So the setUp method
will rebuild the database, create a client and the
selenium driver. Subclasses can focus on creating
the proper tests. It also provides two utility
methods: is_element_present, is_alert_present and
close_alert_and_get_its_text.
Creating new tests is as simple as running Firefox
with selenium and running the whole process.
"""
def setUp(self):
# setup gateway4labs environment
# setup selenium environment
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:5000/"
self.verificationErrors = []
self.accept_next_alert = True
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add first skeleton of selenium-based test<commit_after>import unittest
import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
class IntegrationTestCase(unittest.TestCase):
"""
This class wraps Selenium. So the setUp method
will rebuild the database, create a client and the
selenium driver. Subclasses can focus on creating
the proper tests. It also provides two utility
methods: is_element_present, is_alert_present and
close_alert_and_get_its_text.
Creating new tests is as simple as running Firefox
with selenium and running the whole process.
"""
def setUp(self):
# setup gateway4labs environment
# setup selenium environment
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:5000/"
self.verificationErrors = []
self.accept_next_alert = True
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
|
773003b45d472807b17b41db3a96ea1795571ddf
|
python/ember/examples/example_cylindrical_outward.py
|
python/ember/examples/example_cylindrical_outward.py
|
#!/usr/bin/env python
"""
Outwardly-propagating cylindrical geometry for a strained lean methane flame.
The converged axial velocity profile is plotted. The stagnation point is
located at r=0.
"""
from ember import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
output = 'run/ex_cylindrical_outward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_outward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=False,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.005),
StrainParameters(initial=500,
final=500),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
struct = utils.load(output + '/profNow.h5')
plt.figure()
plt.plot(struct.x, struct.V / struct.rho)
plt.xlabel('Position [m]')
plt.ylabel('Axial Velocity [m/s]')
plt.savefig(output + '/FinalAxialVelocity.png')
plt.close()
|
Add example for outwardly-propagating cylindrical flame
|
Add example for outwardly-propagating cylindrical flame
|
Python
|
mit
|
speth/ember,speth/ember,speth/ember
|
Add example for outwardly-propagating cylindrical flame
|
#!/usr/bin/env python
"""
Outwardly-propagating cylindrical geometry for a strained lean methane flame.
The converged axial velocity profile is plotted. The stagnation point is
located at r=0.
"""
from ember import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
output = 'run/ex_cylindrical_outward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_outward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=False,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.005),
StrainParameters(initial=500,
final=500),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
struct = utils.load(output + '/profNow.h5')
plt.figure()
plt.plot(struct.x, struct.V / struct.rho)
plt.xlabel('Position [m]')
plt.ylabel('Axial Velocity [m/s]')
plt.savefig(output + '/FinalAxialVelocity.png')
plt.close()
|
<commit_before><commit_msg>Add example for outwardly-propagating cylindrical flame<commit_after>
|
#!/usr/bin/env python
"""
Outwardly-propagating cylindrical geometry for a strained lean methane flame.
The converged axial velocity profile is plotted. The stagnation point is
located at r=0.
"""
from ember import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
output = 'run/ex_cylindrical_outward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_outward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=False,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.005),
StrainParameters(initial=500,
final=500),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
struct = utils.load(output + '/profNow.h5')
plt.figure()
plt.plot(struct.x, struct.V / struct.rho)
plt.xlabel('Position [m]')
plt.ylabel('Axial Velocity [m/s]')
plt.savefig(output + '/FinalAxialVelocity.png')
plt.close()
|
Add example for outwardly-propagating cylindrical flame#!/usr/bin/env python
"""
Outwardly-propagating cylindrical geometry for a strained lean methane flame.
The converged axial velocity profile is plotted. The stagnation point is
located at r=0.
"""
from ember import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
output = 'run/ex_cylindrical_outward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_outward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=False,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.005),
StrainParameters(initial=500,
final=500),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
struct = utils.load(output + '/profNow.h5')
plt.figure()
plt.plot(struct.x, struct.V / struct.rho)
plt.xlabel('Position [m]')
plt.ylabel('Axial Velocity [m/s]')
plt.savefig(output + '/FinalAxialVelocity.png')
plt.close()
|
<commit_before><commit_msg>Add example for outwardly-propagating cylindrical flame<commit_after>#!/usr/bin/env python
"""
Outwardly-propagating cylindrical geometry for a strained lean methane flame.
The converged axial velocity profile is plotted. The stagnation point is
located at r=0.
"""
from ember import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
output = 'run/ex_cylindrical_outward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_outward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=False,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.005),
StrainParameters(initial=500,
final=500),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
struct = utils.load(output + '/profNow.h5')
plt.figure()
plt.plot(struct.x, struct.V / struct.rho)
plt.xlabel('Position [m]')
plt.ylabel('Axial Velocity [m/s]')
plt.savefig(output + '/FinalAxialVelocity.png')
plt.close()
|
|
4b5650b57c28e33003795075c439632f4b2dd1e8
|
user_management/models/tests/test_admin.py
|
user_management/models/tests/test_admin.py
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from ..admin import VerifyUserAdmin
from .factories import UserFactory
from .models import User
class VerifyUserAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_create_fieldsets(self):
expected_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None),
expected_fieldsets,
)
def test_fieldsets(self):
expected_fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('name',)}),
('Permissions', {
'fields': (
('is_active', 'verified_email'),
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}),
('Important dates', {
'fields': ('last_login', 'date_joined'),
}),
)
user = UserFactory.build()
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None, obj=user),
expected_fieldsets,
)
|
Add tests for VerifyUserAdmin fieldsets
|
Add tests for VerifyUserAdmin fieldsets
|
Python
|
bsd-2-clause
|
incuna/django-user-management,incuna/django-user-management
|
Add tests for VerifyUserAdmin fieldsets
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from ..admin import VerifyUserAdmin
from .factories import UserFactory
from .models import User
class VerifyUserAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_create_fieldsets(self):
expected_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None),
expected_fieldsets,
)
def test_fieldsets(self):
expected_fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('name',)}),
('Permissions', {
'fields': (
('is_active', 'verified_email'),
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}),
('Important dates', {
'fields': ('last_login', 'date_joined'),
}),
)
user = UserFactory.build()
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None, obj=user),
expected_fieldsets,
)
|
<commit_before><commit_msg>Add tests for VerifyUserAdmin fieldsets<commit_after>
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from ..admin import VerifyUserAdmin
from .factories import UserFactory
from .models import User
class VerifyUserAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_create_fieldsets(self):
expected_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None),
expected_fieldsets,
)
def test_fieldsets(self):
expected_fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('name',)}),
('Permissions', {
'fields': (
('is_active', 'verified_email'),
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}),
('Important dates', {
'fields': ('last_login', 'date_joined'),
}),
)
user = UserFactory.build()
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None, obj=user),
expected_fieldsets,
)
|
Add tests for VerifyUserAdmin fieldsetsfrom django.contrib.admin.sites import AdminSite
from django.test import TestCase
from ..admin import VerifyUserAdmin
from .factories import UserFactory
from .models import User
class VerifyUserAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_create_fieldsets(self):
expected_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None),
expected_fieldsets,
)
def test_fieldsets(self):
expected_fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('name',)}),
('Permissions', {
'fields': (
('is_active', 'verified_email'),
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}),
('Important dates', {
'fields': ('last_login', 'date_joined'),
}),
)
user = UserFactory.build()
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None, obj=user),
expected_fieldsets,
)
|
<commit_before><commit_msg>Add tests for VerifyUserAdmin fieldsets<commit_after>from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from ..admin import VerifyUserAdmin
from .factories import UserFactory
from .models import User
class VerifyUserAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_create_fieldsets(self):
expected_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None),
expected_fieldsets,
)
def test_fieldsets(self):
expected_fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('name',)}),
('Permissions', {
'fields': (
('is_active', 'verified_email'),
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}),
('Important dates', {
'fields': ('last_login', 'date_joined'),
}),
)
user = UserFactory.build()
verify_user_admin = VerifyUserAdmin(User, self.site)
self.assertEqual(
verify_user_admin.get_fieldsets(request=None, obj=user),
expected_fieldsets,
)
|
|
5d394aa1f2df7a36a90b3dee2436ae3b5742d60e
|
vumi/transports/httprpc/tests/test_auth.py
|
vumi/transports/httprpc/tests/test_auth.py
|
# -*- coding: utf-8 -*-
"""Tests for vumi.transports.httprpc.auth."""
from twisted.web.resource import IResource
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from vumi.tests.helpers import VumiTestCase
from vumi.transports.httprpc.auth import HttpRpcRealm, StaticAuthChecker
class TestHttpRpcRealm(VumiTestCase):
def mk_realm(self):
resource = object()
return resource, HttpRpcRealm(resource)
def test_resource_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
interface, resource, cleanup = realm.requestAvatar(
user, mind, IResource)
self.assertEqual(interface, IResource)
self.assertEqual(resource, expected_resource)
self.assertEqual(cleanup(), None)
def test_unknown_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
self.assertRaises(NotImplementedError,
realm.requestAvatar, user, mind, *[])
class TestStaticAuthChecker(VumiTestCase):
def test_valid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "pass")
self.assertEqual(checker.requestAvatarId(creds),
"user")
def test_invalid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "bad-pass")
self.assertRaises(UnauthorizedLogin, checker.requestAvatarId, creds)
|
Add tests for authentication helpers.
|
Add tests for authentication helpers.
|
Python
|
bsd-3-clause
|
harrissoerja/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix,TouK/vumi,TouK/vumi,TouK/vumi
|
Add tests for authentication helpers.
|
# -*- coding: utf-8 -*-
"""Tests for vumi.transports.httprpc.auth."""
from twisted.web.resource import IResource
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from vumi.tests.helpers import VumiTestCase
from vumi.transports.httprpc.auth import HttpRpcRealm, StaticAuthChecker
class TestHttpRpcRealm(VumiTestCase):
def mk_realm(self):
resource = object()
return resource, HttpRpcRealm(resource)
def test_resource_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
interface, resource, cleanup = realm.requestAvatar(
user, mind, IResource)
self.assertEqual(interface, IResource)
self.assertEqual(resource, expected_resource)
self.assertEqual(cleanup(), None)
def test_unknown_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
self.assertRaises(NotImplementedError,
realm.requestAvatar, user, mind, *[])
class TestStaticAuthChecker(VumiTestCase):
def test_valid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "pass")
self.assertEqual(checker.requestAvatarId(creds),
"user")
def test_invalid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "bad-pass")
self.assertRaises(UnauthorizedLogin, checker.requestAvatarId, creds)
|
<commit_before><commit_msg>Add tests for authentication helpers.<commit_after>
|
# -*- coding: utf-8 -*-
"""Tests for vumi.transports.httprpc.auth."""
from twisted.web.resource import IResource
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from vumi.tests.helpers import VumiTestCase
from vumi.transports.httprpc.auth import HttpRpcRealm, StaticAuthChecker
class TestHttpRpcRealm(VumiTestCase):
def mk_realm(self):
resource = object()
return resource, HttpRpcRealm(resource)
def test_resource_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
interface, resource, cleanup = realm.requestAvatar(
user, mind, IResource)
self.assertEqual(interface, IResource)
self.assertEqual(resource, expected_resource)
self.assertEqual(cleanup(), None)
def test_unknown_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
self.assertRaises(NotImplementedError,
realm.requestAvatar, user, mind, *[])
class TestStaticAuthChecker(VumiTestCase):
def test_valid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "pass")
self.assertEqual(checker.requestAvatarId(creds),
"user")
def test_invalid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "bad-pass")
self.assertRaises(UnauthorizedLogin, checker.requestAvatarId, creds)
|
Add tests for authentication helpers.# -*- coding: utf-8 -*-
"""Tests for vumi.transports.httprpc.auth."""
from twisted.web.resource import IResource
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from vumi.tests.helpers import VumiTestCase
from vumi.transports.httprpc.auth import HttpRpcRealm, StaticAuthChecker
class TestHttpRpcRealm(VumiTestCase):
def mk_realm(self):
resource = object()
return resource, HttpRpcRealm(resource)
def test_resource_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
interface, resource, cleanup = realm.requestAvatar(
user, mind, IResource)
self.assertEqual(interface, IResource)
self.assertEqual(resource, expected_resource)
self.assertEqual(cleanup(), None)
def test_unknown_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
self.assertRaises(NotImplementedError,
realm.requestAvatar, user, mind, *[])
class TestStaticAuthChecker(VumiTestCase):
def test_valid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "pass")
self.assertEqual(checker.requestAvatarId(creds),
"user")
def test_invalid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "bad-pass")
self.assertRaises(UnauthorizedLogin, checker.requestAvatarId, creds)
|
<commit_before><commit_msg>Add tests for authentication helpers.<commit_after># -*- coding: utf-8 -*-
"""Tests for vumi.transports.httprpc.auth."""
from twisted.web.resource import IResource
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from vumi.tests.helpers import VumiTestCase
from vumi.transports.httprpc.auth import HttpRpcRealm, StaticAuthChecker
class TestHttpRpcRealm(VumiTestCase):
def mk_realm(self):
resource = object()
return resource, HttpRpcRealm(resource)
def test_resource_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
interface, resource, cleanup = realm.requestAvatar(
user, mind, IResource)
self.assertEqual(interface, IResource)
self.assertEqual(resource, expected_resource)
self.assertEqual(cleanup(), None)
def test_unknown_interface(self):
user, mind = object(), object()
expected_resource, realm = self.mk_realm()
self.assertRaises(NotImplementedError,
realm.requestAvatar, user, mind, *[])
class TestStaticAuthChecker(VumiTestCase):
def test_valid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "pass")
self.assertEqual(checker.requestAvatarId(creds),
"user")
def test_invalid_credentials(self):
checker = StaticAuthChecker("user", "pass")
creds = UsernamePassword("user", "bad-pass")
self.assertRaises(UnauthorizedLogin, checker.requestAvatarId, creds)
|
|
92bbe67fe2e5528e8d87c3f9897b8791f022f6a5
|
tests/test_fileparse_api.py
|
tests/test_fileparse_api.py
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests the FileParser API
"""
from tvnamer.utils import FileParser, EpisodeInfo, DatedEpisodeInfo, NoSeasonEpisodeInfo
def test_episodeinfo():
"""Parsing a s01e01 episode should return EpisodeInfo class
"""
p = FileParser("scrubs.s01e01.avi").parse()
assert isinstance(p, EpisodeInfo)
def test_datedepisodeinfo():
"""Parsing a 2009.06.05 episode should return DatedEpisodeInfo class
"""
p = FileParser("scrubs.2009.06.05.avi").parse()
assert isinstance(p, DatedEpisodeInfo)
def test_noseasonepisodeinfo():
"""Parsing a e23 episode should return NoSeasonEpisodeInfo class
"""
p = FileParser("scrubs - e23.avi").parse()
assert isinstance(p, NoSeasonEpisodeInfo)
|
Test FileParser returns correct object type
|
Test FileParser returns correct object type
|
Python
|
unlicense
|
m42e/tvnamer,dbr/tvnamer,lahwaacz/tvnamer
|
Test FileParser returns correct object type
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests the FileParser API
"""
from tvnamer.utils import FileParser, EpisodeInfo, DatedEpisodeInfo, NoSeasonEpisodeInfo
def test_episodeinfo():
"""Parsing a s01e01 episode should return EpisodeInfo class
"""
p = FileParser("scrubs.s01e01.avi").parse()
assert isinstance(p, EpisodeInfo)
def test_datedepisodeinfo():
"""Parsing a 2009.06.05 episode should return DatedEpisodeInfo class
"""
p = FileParser("scrubs.2009.06.05.avi").parse()
assert isinstance(p, DatedEpisodeInfo)
def test_noseasonepisodeinfo():
"""Parsing a e23 episode should return NoSeasonEpisodeInfo class
"""
p = FileParser("scrubs - e23.avi").parse()
assert isinstance(p, NoSeasonEpisodeInfo)
|
<commit_before><commit_msg>Test FileParser returns correct object type<commit_after>
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests the FileParser API
"""
from tvnamer.utils import FileParser, EpisodeInfo, DatedEpisodeInfo, NoSeasonEpisodeInfo
def test_episodeinfo():
"""Parsing a s01e01 episode should return EpisodeInfo class
"""
p = FileParser("scrubs.s01e01.avi").parse()
assert isinstance(p, EpisodeInfo)
def test_datedepisodeinfo():
"""Parsing a 2009.06.05 episode should return DatedEpisodeInfo class
"""
p = FileParser("scrubs.2009.06.05.avi").parse()
assert isinstance(p, DatedEpisodeInfo)
def test_noseasonepisodeinfo():
"""Parsing a e23 episode should return NoSeasonEpisodeInfo class
"""
p = FileParser("scrubs - e23.avi").parse()
assert isinstance(p, NoSeasonEpisodeInfo)
|
Test FileParser returns correct object type#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests the FileParser API
"""
from tvnamer.utils import FileParser, EpisodeInfo, DatedEpisodeInfo, NoSeasonEpisodeInfo
def test_episodeinfo():
"""Parsing a s01e01 episode should return EpisodeInfo class
"""
p = FileParser("scrubs.s01e01.avi").parse()
assert isinstance(p, EpisodeInfo)
def test_datedepisodeinfo():
"""Parsing a 2009.06.05 episode should return DatedEpisodeInfo class
"""
p = FileParser("scrubs.2009.06.05.avi").parse()
assert isinstance(p, DatedEpisodeInfo)
def test_noseasonepisodeinfo():
"""Parsing a e23 episode should return NoSeasonEpisodeInfo class
"""
p = FileParser("scrubs - e23.avi").parse()
assert isinstance(p, NoSeasonEpisodeInfo)
|
<commit_before><commit_msg>Test FileParser returns correct object type<commit_after>#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests the FileParser API
"""
from tvnamer.utils import FileParser, EpisodeInfo, DatedEpisodeInfo, NoSeasonEpisodeInfo
def test_episodeinfo():
"""Parsing a s01e01 episode should return EpisodeInfo class
"""
p = FileParser("scrubs.s01e01.avi").parse()
assert isinstance(p, EpisodeInfo)
def test_datedepisodeinfo():
"""Parsing a 2009.06.05 episode should return DatedEpisodeInfo class
"""
p = FileParser("scrubs.2009.06.05.avi").parse()
assert isinstance(p, DatedEpisodeInfo)
def test_noseasonepisodeinfo():
"""Parsing a e23 episode should return NoSeasonEpisodeInfo class
"""
p = FileParser("scrubs - e23.avi").parse()
assert isinstance(p, NoSeasonEpisodeInfo)
|
|
ab88061c78cd17913faf6249f4d70a48779b4e56
|
tests/unit_test_xmile2py.py
|
tests/unit_test_xmile2py.py
|
import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestEquationStringParsing(unittest.TestCase):
def test_multiline_equation():
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
assert idx > 0, 'Correct, generated, equation not found'
os.remove(temp_file.name)
os.remove(generated_file+'.py')
|
Test to ensure equations with line breaks are parsed correctly
|
Test to ensure equations with line breaks are parsed correctly
|
Python
|
mit
|
JamesPHoughton/pysd
|
Test to ensure equations with line breaks are parsed correctly
|
import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestEquationStringParsing(unittest.TestCase):
def test_multiline_equation():
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
assert idx > 0, 'Correct, generated, equation not found'
os.remove(temp_file.name)
os.remove(generated_file+'.py')
|
<commit_before><commit_msg>Test to ensure equations with line breaks are parsed correctly<commit_after>
|
import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestEquationStringParsing(unittest.TestCase):
def test_multiline_equation():
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
assert idx > 0, 'Correct, generated, equation not found'
os.remove(temp_file.name)
os.remove(generated_file+'.py')
|
Test to ensure equations with line breaks are parsed correctlyimport os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestEquationStringParsing(unittest.TestCase):
def test_multiline_equation():
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
assert idx > 0, 'Correct, generated, equation not found'
os.remove(temp_file.name)
os.remove(generated_file+'.py')
|
<commit_before><commit_msg>Test to ensure equations with line breaks are parsed correctly<commit_after>import os
import unittest
import tempfile
from io import StringIO
from pysd.py_backend.xmile.xmile2py import translate_xmile
class TestEquationStringParsing(unittest.TestCase):
def test_multiline_equation():
with open('tests/test-models/tests/game/test_game.stmx', 'r') as stmx:
contents = stmx.read()
# Insert line break in equation definition
contents = contents.replace('<eqn>(Stock+Constant)</eqn>', '<eqn>(Stock+\nConstant)</eqn>')
# Write out contents to temporary file
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(contents)
# Convert file (should not raise error)
generated_file = translate_xmile(temp_file.name)
with open(generated_file, 'r') as fp:
contents = fp.read()
idx = contents.find('stock() + constant()')
assert idx > 0, 'Correct, generated, equation not found'
os.remove(temp_file.name)
os.remove(generated_file+'.py')
|
|
debcc4d639945e676fb4579f71bfa711e29e343f
|
src/project/word_cloud.py
|
src/project/word_cloud.py
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
from lda_corpus import LDACorpus
class WordCloud(object):
def __init__(self, lda_corpus):
self.corpus = lda_corpus
def draw_topics(self):
print self.corpus
topics = self.corpus.print_topics()
print topics
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]):
if not isfile(sys.argv[2]) and not isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
wc = WordCloud(corpus)
print wc.draw_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add framework for wordcloud visualisation
|
Add framework for wordcloud visualisation
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add framework for wordcloud visualisation
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
from lda_corpus import LDACorpus
class WordCloud(object):
def __init__(self, lda_corpus):
self.corpus = lda_corpus
def draw_topics(self):
print self.corpus
topics = self.corpus.print_topics()
print topics
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]):
if not isfile(sys.argv[2]) and not isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
wc = WordCloud(corpus)
print wc.draw_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add framework for wordcloud visualisation<commit_after>
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
from lda_corpus import LDACorpus
class WordCloud(object):
def __init__(self, lda_corpus):
self.corpus = lda_corpus
def draw_topics(self):
print self.corpus
topics = self.corpus.print_topics()
print topics
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]):
if not isfile(sys.argv[2]) and not isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
wc = WordCloud(corpus)
print wc.draw_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add framework for wordcloud visualisationimport sys
from os.path import isdir, isfile
from corpus import Corpus
from lda_corpus import LDACorpus
class WordCloud(object):
def __init__(self, lda_corpus):
self.corpus = lda_corpus
def draw_topics(self):
print self.corpus
topics = self.corpus.print_topics()
print topics
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]):
if not isfile(sys.argv[2]) and not isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
wc = WordCloud(corpus)
print wc.draw_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add framework for wordcloud visualisation<commit_after>import sys
from os.path import isdir, isfile
from corpus import Corpus
from lda_corpus import LDACorpus
class WordCloud(object):
def __init__(self, lda_corpus):
self.corpus = lda_corpus
def draw_topics(self):
print self.corpus
topics = self.corpus.print_topics()
print topics
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]):
if not isfile(sys.argv[2]) and not isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
wc = WordCloud(corpus)
print wc.draw_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
|
8a4f814630ffd106f9711a2fb339c12c4df3efb0
|
328-Odd_Even_Linked_List.py
|
328-Odd_Even_Linked_List.py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
odd_tail = even_tail = None
even_head = None
cur = head
counter = 1
while cur is not None:
next = cur.next
if counter % 2 == 1:
if odd_tail is None:
odd_tail = cur
else:
odd_tail.next = cur
odd_tail = cur
else:
if even_tail is None:
even_tail = cur
even_head = cur
else:
even_tail.next = cur
even_tail = cur
cur.next = None
counter += 1
cur = next
odd_tail.next = even_head
return head
|
Solve 328 wtf so easy
|
Solve 328 wtf so easy
|
Python
|
apache-2.0
|
HappyCompanions/LeetCode,HappyCompanions/LeetCode,HappyCompanions/LeetCode,HappyCompanions/LeetCode,HappyCompanions/LeetCode,HappyCompanions/LeetCode,HappyCompanions/LeetCode
|
Solve 328 wtf so easy
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
odd_tail = even_tail = None
even_head = None
cur = head
counter = 1
while cur is not None:
next = cur.next
if counter % 2 == 1:
if odd_tail is None:
odd_tail = cur
else:
odd_tail.next = cur
odd_tail = cur
else:
if even_tail is None:
even_tail = cur
even_head = cur
else:
even_tail.next = cur
even_tail = cur
cur.next = None
counter += 1
cur = next
odd_tail.next = even_head
return head
|
<commit_before><commit_msg>Solve 328 wtf so easy<commit_after>
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
odd_tail = even_tail = None
even_head = None
cur = head
counter = 1
while cur is not None:
next = cur.next
if counter % 2 == 1:
if odd_tail is None:
odd_tail = cur
else:
odd_tail.next = cur
odd_tail = cur
else:
if even_tail is None:
even_tail = cur
even_head = cur
else:
even_tail.next = cur
even_tail = cur
cur.next = None
counter += 1
cur = next
odd_tail.next = even_head
return head
|
Solve 328 wtf so easy# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
odd_tail = even_tail = None
even_head = None
cur = head
counter = 1
while cur is not None:
next = cur.next
if counter % 2 == 1:
if odd_tail is None:
odd_tail = cur
else:
odd_tail.next = cur
odd_tail = cur
else:
if even_tail is None:
even_tail = cur
even_head = cur
else:
even_tail.next = cur
even_tail = cur
cur.next = None
counter += 1
cur = next
odd_tail.next = even_head
return head
|
<commit_before><commit_msg>Solve 328 wtf so easy<commit_after># Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
odd_tail = even_tail = None
even_head = None
cur = head
counter = 1
while cur is not None:
next = cur.next
if counter % 2 == 1:
if odd_tail is None:
odd_tail = cur
else:
odd_tail.next = cur
odd_tail = cur
else:
if even_tail is None:
even_tail = cur
even_head = cur
else:
even_tail.next = cur
even_tail = cur
cur.next = None
counter += 1
cur = next
odd_tail.next = even_head
return head
|
|
1b57a7263b4a3a5f0f124cac27d9b1324ccfe4a6
|
tunacell/tests/test_simu.py
|
tunacell/tests/test_simu.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import pytest
import numpy as np
import itertools
from tunacell.simu.main import SimuParams, DivisionParams, SampleInitialSize
#from tunacell.simu.ou import OUSimulation, run_ou_simulation
@pytest.fixture
def simu_params():
sp = SimuParams(nbr_container=5, nbr_colony_per_container=1,
start=0., stop=400., period=5., seed=42)
return sp
def test_division_params():
"""Test DivisionParams"""
lambdas = np.linspace(0, 1, 5) # 5 values
sizes = np.linspace(1, 5, 5)
modes = ['none', 'gamma']
flucts = np.linspace(0.1, 1, 5)
growth_rates = np.log(2.)/ (60. * np.array([20., 33.3, 47.7, 60., 1001.]))
for item in itertools.product(lambdas, sizes, modes, flucts):
div_lambda, div_size, div_mode, div_sd_to_mean = item
dp = DivisionParams(div_lambda, div_size, div_mode, div_sd_to_mean)
# avoid problem by reaching division size cutoff
birth_sizes = np.linspace(div_size/10., div_size, 5, endpoint=False)
for bs, mu in itertools.product(birth_sizes, growth_rates):
assert dp.rv(birth_size=bs, growth_rate=mu) > 0.
def test_sample_initial_size():
"""Test SampleInitialSize"""
sizes = np.linspace(1, 10, 7)
# mode: fixed
for size in sizes:
bs = SampleInitialSize(birth_size_mean=size)
assert bs.rv() == size
# mode: lognormal
flucts = np.linspace(0.1, 3, 7)
for size, fluct in itertools.product(sizes, flucts):
bs = SampleInitialSize(birth_size_mean=size,
birth_size_mode='lognormal',
birth_size_sd_to_mean=fluct)
assert bs.rv() > 0.
|
Add test (work on progress)
|
Add test (work on progress)
|
Python
|
mit
|
LeBarbouze/tunacell
|
Add test (work on progress)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import pytest
import numpy as np
import itertools
from tunacell.simu.main import SimuParams, DivisionParams, SampleInitialSize
#from tunacell.simu.ou import OUSimulation, run_ou_simulation
@pytest.fixture
def simu_params():
sp = SimuParams(nbr_container=5, nbr_colony_per_container=1,
start=0., stop=400., period=5., seed=42)
return sp
def test_division_params():
"""Test DivisionParams"""
lambdas = np.linspace(0, 1, 5) # 5 values
sizes = np.linspace(1, 5, 5)
modes = ['none', 'gamma']
flucts = np.linspace(0.1, 1, 5)
growth_rates = np.log(2.)/ (60. * np.array([20., 33.3, 47.7, 60., 1001.]))
for item in itertools.product(lambdas, sizes, modes, flucts):
div_lambda, div_size, div_mode, div_sd_to_mean = item
dp = DivisionParams(div_lambda, div_size, div_mode, div_sd_to_mean)
# avoid problem by reaching division size cutoff
birth_sizes = np.linspace(div_size/10., div_size, 5, endpoint=False)
for bs, mu in itertools.product(birth_sizes, growth_rates):
assert dp.rv(birth_size=bs, growth_rate=mu) > 0.
def test_sample_initial_size():
"""Test SampleInitialSize"""
sizes = np.linspace(1, 10, 7)
# mode: fixed
for size in sizes:
bs = SampleInitialSize(birth_size_mean=size)
assert bs.rv() == size
# mode: lognormal
flucts = np.linspace(0.1, 3, 7)
for size, fluct in itertools.product(sizes, flucts):
bs = SampleInitialSize(birth_size_mean=size,
birth_size_mode='lognormal',
birth_size_sd_to_mean=fluct)
assert bs.rv() > 0.
|
<commit_before><commit_msg>Add test (work on progress)<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import pytest
import numpy as np
import itertools
from tunacell.simu.main import SimuParams, DivisionParams, SampleInitialSize
#from tunacell.simu.ou import OUSimulation, run_ou_simulation
@pytest.fixture
def simu_params():
sp = SimuParams(nbr_container=5, nbr_colony_per_container=1,
start=0., stop=400., period=5., seed=42)
return sp
def test_division_params():
"""Test DivisionParams"""
lambdas = np.linspace(0, 1, 5) # 5 values
sizes = np.linspace(1, 5, 5)
modes = ['none', 'gamma']
flucts = np.linspace(0.1, 1, 5)
growth_rates = np.log(2.)/ (60. * np.array([20., 33.3, 47.7, 60., 1001.]))
for item in itertools.product(lambdas, sizes, modes, flucts):
div_lambda, div_size, div_mode, div_sd_to_mean = item
dp = DivisionParams(div_lambda, div_size, div_mode, div_sd_to_mean)
# avoid problem by reaching division size cutoff
birth_sizes = np.linspace(div_size/10., div_size, 5, endpoint=False)
for bs, mu in itertools.product(birth_sizes, growth_rates):
assert dp.rv(birth_size=bs, growth_rate=mu) > 0.
def test_sample_initial_size():
"""Test SampleInitialSize"""
sizes = np.linspace(1, 10, 7)
# mode: fixed
for size in sizes:
bs = SampleInitialSize(birth_size_mean=size)
assert bs.rv() == size
# mode: lognormal
flucts = np.linspace(0.1, 3, 7)
for size, fluct in itertools.product(sizes, flucts):
bs = SampleInitialSize(birth_size_mean=size,
birth_size_mode='lognormal',
birth_size_sd_to_mean=fluct)
assert bs.rv() > 0.
|
Add test (work on progress)#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import pytest
import numpy as np
import itertools
from tunacell.simu.main import SimuParams, DivisionParams, SampleInitialSize
#from tunacell.simu.ou import OUSimulation, run_ou_simulation
@pytest.fixture
def simu_params():
sp = SimuParams(nbr_container=5, nbr_colony_per_container=1,
start=0., stop=400., period=5., seed=42)
return sp
def test_division_params():
"""Test DivisionParams"""
lambdas = np.linspace(0, 1, 5) # 5 values
sizes = np.linspace(1, 5, 5)
modes = ['none', 'gamma']
flucts = np.linspace(0.1, 1, 5)
growth_rates = np.log(2.)/ (60. * np.array([20., 33.3, 47.7, 60., 1001.]))
for item in itertools.product(lambdas, sizes, modes, flucts):
div_lambda, div_size, div_mode, div_sd_to_mean = item
dp = DivisionParams(div_lambda, div_size, div_mode, div_sd_to_mean)
# avoid problem by reaching division size cutoff
birth_sizes = np.linspace(div_size/10., div_size, 5, endpoint=False)
for bs, mu in itertools.product(birth_sizes, growth_rates):
assert dp.rv(birth_size=bs, growth_rate=mu) > 0.
def test_sample_initial_size():
"""Test SampleInitialSize"""
sizes = np.linspace(1, 10, 7)
# mode: fixed
for size in sizes:
bs = SampleInitialSize(birth_size_mean=size)
assert bs.rv() == size
# mode: lognormal
flucts = np.linspace(0.1, 3, 7)
for size, fluct in itertools.product(sizes, flucts):
bs = SampleInitialSize(birth_size_mean=size,
birth_size_mode='lognormal',
birth_size_sd_to_mean=fluct)
assert bs.rv() > 0.
|
<commit_before><commit_msg>Add test (work on progress)<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import pytest
import numpy as np
import itertools
from tunacell.simu.main import SimuParams, DivisionParams, SampleInitialSize
#from tunacell.simu.ou import OUSimulation, run_ou_simulation
@pytest.fixture
def simu_params():
sp = SimuParams(nbr_container=5, nbr_colony_per_container=1,
start=0., stop=400., period=5., seed=42)
return sp
def test_division_params():
"""Test DivisionParams"""
lambdas = np.linspace(0, 1, 5) # 5 values
sizes = np.linspace(1, 5, 5)
modes = ['none', 'gamma']
flucts = np.linspace(0.1, 1, 5)
growth_rates = np.log(2.)/ (60. * np.array([20., 33.3, 47.7, 60., 1001.]))
for item in itertools.product(lambdas, sizes, modes, flucts):
div_lambda, div_size, div_mode, div_sd_to_mean = item
dp = DivisionParams(div_lambda, div_size, div_mode, div_sd_to_mean)
# avoid problem by reaching division size cutoff
birth_sizes = np.linspace(div_size/10., div_size, 5, endpoint=False)
for bs, mu in itertools.product(birth_sizes, growth_rates):
assert dp.rv(birth_size=bs, growth_rate=mu) > 0.
def test_sample_initial_size():
"""Test SampleInitialSize"""
sizes = np.linspace(1, 10, 7)
# mode: fixed
for size in sizes:
bs = SampleInitialSize(birth_size_mean=size)
assert bs.rv() == size
# mode: lognormal
flucts = np.linspace(0.1, 3, 7)
for size, fluct in itertools.product(sizes, flucts):
bs = SampleInitialSize(birth_size_mean=size,
birth_size_mode='lognormal',
birth_size_sd_to_mean=fluct)
assert bs.rv() > 0.
|
|
79724ffb33766eb2b71ab6571564cc7dd77c401f
|
test_async.py
|
test_async.py
|
import unittest
import chainer
import chainer.links as L
import numpy as np
import async
class TestAsync(unittest.TestCase):
def setUp(self):
pass
def test_shared_link(self):
"""Check interprocess parameter sharing works if models share links
"""
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
a_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_a))
b_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_b))
print 'model_a shared_arrays', a_arrays
print 'model_b shared_arrays', b_arrays
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
async.set_shared_params(model_a, a_arrays)
async.set_shared_params(model_b, b_arrays)
print 'model_a replaced'
a_params = dict(model_a.namedparams())
for param_name, param in a_params.iteritems():
print param_name, param.data.ctypes.data
print 'model_b replaced'
b_params = dict(model_b.namedparams())
for param_name, param in b_params.iteritems():
print param_name, param.data.ctypes.data
# Pointers to head parameters must be the same
self.assertEquals(a_params['/0/W'].data.ctypes.data,
b_params['/0/W'].data.ctypes.data)
self.assertEquals(a_params['/0/b'].data.ctypes.data,
b_params['/0/b'].data.ctypes.data)
# Pointers to tail parameters must be different
self.assertNotEquals(a_params['/1/W'].data.ctypes.data,
b_params['/1/W'].data.ctypes.data)
self.assertNotEquals(a_params['/1/b'].data.ctypes.data,
b_params['/1/b'].data.ctypes.data)
|
Add a test for paramter sharing
|
Add a test for paramter sharing
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add a test for paramter sharing
|
import unittest
import chainer
import chainer.links as L
import numpy as np
import async
class TestAsync(unittest.TestCase):
def setUp(self):
pass
def test_shared_link(self):
"""Check interprocess parameter sharing works if models share links
"""
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
a_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_a))
b_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_b))
print 'model_a shared_arrays', a_arrays
print 'model_b shared_arrays', b_arrays
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
async.set_shared_params(model_a, a_arrays)
async.set_shared_params(model_b, b_arrays)
print 'model_a replaced'
a_params = dict(model_a.namedparams())
for param_name, param in a_params.iteritems():
print param_name, param.data.ctypes.data
print 'model_b replaced'
b_params = dict(model_b.namedparams())
for param_name, param in b_params.iteritems():
print param_name, param.data.ctypes.data
# Pointers to head parameters must be the same
self.assertEquals(a_params['/0/W'].data.ctypes.data,
b_params['/0/W'].data.ctypes.data)
self.assertEquals(a_params['/0/b'].data.ctypes.data,
b_params['/0/b'].data.ctypes.data)
# Pointers to tail parameters must be different
self.assertNotEquals(a_params['/1/W'].data.ctypes.data,
b_params['/1/W'].data.ctypes.data)
self.assertNotEquals(a_params['/1/b'].data.ctypes.data,
b_params['/1/b'].data.ctypes.data)
|
<commit_before><commit_msg>Add a test for paramter sharing<commit_after>
|
import unittest
import chainer
import chainer.links as L
import numpy as np
import async
class TestAsync(unittest.TestCase):
def setUp(self):
pass
def test_shared_link(self):
"""Check interprocess parameter sharing works if models share links
"""
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
a_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_a))
b_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_b))
print 'model_a shared_arrays', a_arrays
print 'model_b shared_arrays', b_arrays
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
async.set_shared_params(model_a, a_arrays)
async.set_shared_params(model_b, b_arrays)
print 'model_a replaced'
a_params = dict(model_a.namedparams())
for param_name, param in a_params.iteritems():
print param_name, param.data.ctypes.data
print 'model_b replaced'
b_params = dict(model_b.namedparams())
for param_name, param in b_params.iteritems():
print param_name, param.data.ctypes.data
# Pointers to head parameters must be the same
self.assertEquals(a_params['/0/W'].data.ctypes.data,
b_params['/0/W'].data.ctypes.data)
self.assertEquals(a_params['/0/b'].data.ctypes.data,
b_params['/0/b'].data.ctypes.data)
# Pointers to tail parameters must be different
self.assertNotEquals(a_params['/1/W'].data.ctypes.data,
b_params['/1/W'].data.ctypes.data)
self.assertNotEquals(a_params['/1/b'].data.ctypes.data,
b_params['/1/b'].data.ctypes.data)
|
Add a test for paramter sharingimport unittest
import chainer
import chainer.links as L
import numpy as np
import async
class TestAsync(unittest.TestCase):
def setUp(self):
pass
def test_shared_link(self):
"""Check interprocess parameter sharing works if models share links
"""
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
a_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_a))
b_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_b))
print 'model_a shared_arrays', a_arrays
print 'model_b shared_arrays', b_arrays
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
async.set_shared_params(model_a, a_arrays)
async.set_shared_params(model_b, b_arrays)
print 'model_a replaced'
a_params = dict(model_a.namedparams())
for param_name, param in a_params.iteritems():
print param_name, param.data.ctypes.data
print 'model_b replaced'
b_params = dict(model_b.namedparams())
for param_name, param in b_params.iteritems():
print param_name, param.data.ctypes.data
# Pointers to head parameters must be the same
self.assertEquals(a_params['/0/W'].data.ctypes.data,
b_params['/0/W'].data.ctypes.data)
self.assertEquals(a_params['/0/b'].data.ctypes.data,
b_params['/0/b'].data.ctypes.data)
# Pointers to tail parameters must be different
self.assertNotEquals(a_params['/1/W'].data.ctypes.data,
b_params['/1/W'].data.ctypes.data)
self.assertNotEquals(a_params['/1/b'].data.ctypes.data,
b_params['/1/b'].data.ctypes.data)
|
<commit_before><commit_msg>Add a test for paramter sharing<commit_after>import unittest
import chainer
import chainer.links as L
import numpy as np
import async
class TestAsync(unittest.TestCase):
def setUp(self):
pass
def test_shared_link(self):
"""Check interprocess parameter sharing works if models share links
"""
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
a_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_a))
b_arrays = async.extract_params_as_shared_arrays(
chainer.ChainList(model_b))
print 'model_a shared_arrays', a_arrays
print 'model_b shared_arrays', b_arrays
head = L.Linear(2, 2)
model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
async.set_shared_params(model_a, a_arrays)
async.set_shared_params(model_b, b_arrays)
print 'model_a replaced'
a_params = dict(model_a.namedparams())
for param_name, param in a_params.iteritems():
print param_name, param.data.ctypes.data
print 'model_b replaced'
b_params = dict(model_b.namedparams())
for param_name, param in b_params.iteritems():
print param_name, param.data.ctypes.data
# Pointers to head parameters must be the same
self.assertEquals(a_params['/0/W'].data.ctypes.data,
b_params['/0/W'].data.ctypes.data)
self.assertEquals(a_params['/0/b'].data.ctypes.data,
b_params['/0/b'].data.ctypes.data)
# Pointers to tail parameters must be different
self.assertNotEquals(a_params['/1/W'].data.ctypes.data,
b_params['/1/W'].data.ctypes.data)
self.assertNotEquals(a_params['/1/b'].data.ctypes.data,
b_params['/1/b'].data.ctypes.data)
|
|
8c400038b02bb6d57ef27617962823023710c40a
|
migrations/versions/1e529bad1e38_.py
|
migrations/versions/1e529bad1e38_.py
|
"""empty message
Revision ID: 1e529bad1e38
Revises: 3364d1466fac
Create Date: 2015-05-20 17:42:07.580213
"""
# revision identifiers, used by Alembic.
revision = '1e529bad1e38'
down_revision = '3364d1466fac'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'distributions', 'channels', ['channel_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'distributions', type_='foreignkey')
### end Alembic commands ###
|
Fix FK issue in distributions table for alembic migrations
|
Fix FK issue in distributions table for alembic migrations
Squashed commit of the following:
commit b8cf7592d73981236df528f964c20d80f8498cea
Author: Olivier Yiptong <olivier@olivieryiptong.com>
Date: Wed May 20 17:43:05 2015 -0400
fix FK issue in distributions table
Closes #67
|
Python
|
mpl-2.0
|
tkiethanom/splice,mostlygeek/splice,oyiptong/splice,ncloudioj/splice,mostlygeek/splice,rlr/splice,mozilla/splice,oyiptong/splice,mostlygeek/splice,mozilla/splice,tkiethanom/splice,ncloudioj/splice,mozilla/splice,tkiethanom/splice,ncloudioj/splice,tkiethanom/splice,mostlygeek/splice,rlr/splice,rlr/splice,ncloudioj/splice,oyiptong/splice,mozilla/splice,rlr/splice,oyiptong/splice
|
Fix FK issue in distributions table for alembic migrations
Squashed commit of the following:
commit b8cf7592d73981236df528f964c20d80f8498cea
Author: Olivier Yiptong <olivier@olivieryiptong.com>
Date: Wed May 20 17:43:05 2015 -0400
fix FK issue in distributions table
Closes #67
|
"""empty message
Revision ID: 1e529bad1e38
Revises: 3364d1466fac
Create Date: 2015-05-20 17:42:07.580213
"""
# revision identifiers, used by Alembic.
revision = '1e529bad1e38'
down_revision = '3364d1466fac'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'distributions', 'channels', ['channel_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'distributions', type_='foreignkey')
### end Alembic commands ###
|
<commit_before><commit_msg>Fix FK issue in distributions table for alembic migrations
Squashed commit of the following:
commit b8cf7592d73981236df528f964c20d80f8498cea
Author: Olivier Yiptong <olivier@olivieryiptong.com>
Date: Wed May 20 17:43:05 2015 -0400
fix FK issue in distributions table
Closes #67<commit_after>
|
"""empty message
Revision ID: 1e529bad1e38
Revises: 3364d1466fac
Create Date: 2015-05-20 17:42:07.580213
"""
# revision identifiers, used by Alembic.
revision = '1e529bad1e38'
down_revision = '3364d1466fac'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'distributions', 'channels', ['channel_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'distributions', type_='foreignkey')
### end Alembic commands ###
|
Fix FK issue in distributions table for alembic migrations
Squashed commit of the following:
commit b8cf7592d73981236df528f964c20d80f8498cea
Author: Olivier Yiptong <olivier@olivieryiptong.com>
Date: Wed May 20 17:43:05 2015 -0400
fix FK issue in distributions table
Closes #67"""empty message
Revision ID: 1e529bad1e38
Revises: 3364d1466fac
Create Date: 2015-05-20 17:42:07.580213
"""
# revision identifiers, used by Alembic.
revision = '1e529bad1e38'
down_revision = '3364d1466fac'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'distributions', 'channels', ['channel_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'distributions', type_='foreignkey')
### end Alembic commands ###
|
<commit_before><commit_msg>Fix FK issue in distributions table for alembic migrations
Squashed commit of the following:
commit b8cf7592d73981236df528f964c20d80f8498cea
Author: Olivier Yiptong <olivier@olivieryiptong.com>
Date: Wed May 20 17:43:05 2015 -0400
fix FK issue in distributions table
Closes #67<commit_after>"""empty message
Revision ID: 1e529bad1e38
Revises: 3364d1466fac
Create Date: 2015-05-20 17:42:07.580213
"""
# revision identifiers, used by Alembic.
revision = '1e529bad1e38'
down_revision = '3364d1466fac'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'distributions', 'channels', ['channel_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'distributions', type_='foreignkey')
### end Alembic commands ###
|
|
9e7aa9ca8b4301be8c219f4ce37edb271e6cb1ba
|
p1tr/helpers.py
|
p1tr/helpers.py
|
"""Generally useful helper functions, possibly for use in plugins."""
def humanize_time(delta):
"""
Converts a timespan provided as a datetime object into a human-readable
format.
"Inspired" by the time_ago_in_words function in P1tr Legacy.
"""
days = delta.days
minutes = delta.seconds // 60
seconds = delta.seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
def add_unit(value, singular_unit, plural_unit):
if value:
unit = singular_unit if value == 1 else plural_unit
return '%s %s' % (value, unit)
else:
return ''
return ('%s %s %s %s' % (add_unit(days, 'day', 'days'),
add_unit(hours, 'hour', 'hours'),
add_unit(minutes, 'minute', 'minutes'),
add_unit(seconds, 'second', 'seconds'))).strip()
|
Add helper module with humanize_time function
|
Add helper module with humanize_time function
|
Python
|
mit
|
howard/p1tr-tng,howard/p1tr-tng
|
Add helper module with humanize_time function
|
"""Generally useful helper functions, possibly for use in plugins."""
def humanize_time(delta):
"""
Converts a timespan provided as a datetime object into a human-readable
format.
"Inspired" by the time_ago_in_words function in P1tr Legacy.
"""
days = delta.days
minutes = delta.seconds // 60
seconds = delta.seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
def add_unit(value, singular_unit, plural_unit):
if value:
unit = singular_unit if value == 1 else plural_unit
return '%s %s' % (value, unit)
else:
return ''
return ('%s %s %s %s' % (add_unit(days, 'day', 'days'),
add_unit(hours, 'hour', 'hours'),
add_unit(minutes, 'minute', 'minutes'),
add_unit(seconds, 'second', 'seconds'))).strip()
|
<commit_before><commit_msg>Add helper module with humanize_time function<commit_after>
|
"""Generally useful helper functions, possibly for use in plugins."""
def humanize_time(delta):
"""
Converts a timespan provided as a datetime object into a human-readable
format.
"Inspired" by the time_ago_in_words function in P1tr Legacy.
"""
days = delta.days
minutes = delta.seconds // 60
seconds = delta.seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
def add_unit(value, singular_unit, plural_unit):
if value:
unit = singular_unit if value == 1 else plural_unit
return '%s %s' % (value, unit)
else:
return ''
return ('%s %s %s %s' % (add_unit(days, 'day', 'days'),
add_unit(hours, 'hour', 'hours'),
add_unit(minutes, 'minute', 'minutes'),
add_unit(seconds, 'second', 'seconds'))).strip()
|
Add helper module with humanize_time function"""Generally useful helper functions, possibly for use in plugins."""
def humanize_time(delta):
"""
Converts a timespan provided as a datetime object into a human-readable
format.
"Inspired" by the time_ago_in_words function in P1tr Legacy.
"""
days = delta.days
minutes = delta.seconds // 60
seconds = delta.seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
def add_unit(value, singular_unit, plural_unit):
if value:
unit = singular_unit if value == 1 else plural_unit
return '%s %s' % (value, unit)
else:
return ''
return ('%s %s %s %s' % (add_unit(days, 'day', 'days'),
add_unit(hours, 'hour', 'hours'),
add_unit(minutes, 'minute', 'minutes'),
add_unit(seconds, 'second', 'seconds'))).strip()
|
<commit_before><commit_msg>Add helper module with humanize_time function<commit_after>"""Generally useful helper functions, possibly for use in plugins."""
def humanize_time(delta):
"""
Converts a timespan provided as a datetime object into a human-readable
format.
"Inspired" by the time_ago_in_words function in P1tr Legacy.
"""
days = delta.days
minutes = delta.seconds // 60
seconds = delta.seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
def add_unit(value, singular_unit, plural_unit):
if value:
unit = singular_unit if value == 1 else plural_unit
return '%s %s' % (value, unit)
else:
return ''
return ('%s %s %s %s' % (add_unit(days, 'day', 'days'),
add_unit(hours, 'hour', 'hours'),
add_unit(minutes, 'minute', 'minutes'),
add_unit(seconds, 'second', 'seconds'))).strip()
|
|
fa08499661cbc92a87ff7c91c76d6227fd882617
|
catalog/migrations/0017_remove_empty_orphan_courses.py
|
catalog/migrations/0017_remove_empty_orphan_courses.py
|
from django.db import migrations, models
from django.db.models import Count
def forwards(apps, schema_editor):
Course = apps.get_model("catalog", "Course")
empty_orphans = Course.objects.annotate(
docs=Count("document", distinct=True), cats=Count("categories", distinct=True)
).filter(docs=0, cats=0)
empty_orphans.delete()
class Migration(migrations.Migration):
dependencies = [
("catalog", "0016_alter_category_type_courseuserview"),
]
operations = [
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_message"),
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_thread"),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
|
Remove empty orphans while migrating
|
Remove empty orphans while migrating
|
Python
|
agpl-3.0
|
UrLab/DocHub,UrLab/DocHub,UrLab/beta402,UrLab/beta402,UrLab/DocHub,UrLab/DocHub,UrLab/beta402
|
Remove empty orphans while migrating
|
from django.db import migrations, models
from django.db.models import Count
def forwards(apps, schema_editor):
Course = apps.get_model("catalog", "Course")
empty_orphans = Course.objects.annotate(
docs=Count("document", distinct=True), cats=Count("categories", distinct=True)
).filter(docs=0, cats=0)
empty_orphans.delete()
class Migration(migrations.Migration):
dependencies = [
("catalog", "0016_alter_category_type_courseuserview"),
]
operations = [
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_message"),
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_thread"),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Remove empty orphans while migrating<commit_after>
|
from django.db import migrations, models
from django.db.models import Count
def forwards(apps, schema_editor):
Course = apps.get_model("catalog", "Course")
empty_orphans = Course.objects.annotate(
docs=Count("document", distinct=True), cats=Count("categories", distinct=True)
).filter(docs=0, cats=0)
empty_orphans.delete()
class Migration(migrations.Migration):
dependencies = [
("catalog", "0016_alter_category_type_courseuserview"),
]
operations = [
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_message"),
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_thread"),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
|
Remove empty orphans while migratingfrom django.db import migrations, models
from django.db.models import Count
def forwards(apps, schema_editor):
Course = apps.get_model("catalog", "Course")
empty_orphans = Course.objects.annotate(
docs=Count("document", distinct=True), cats=Count("categories", distinct=True)
).filter(docs=0, cats=0)
empty_orphans.delete()
class Migration(migrations.Migration):
dependencies = [
("catalog", "0016_alter_category_type_courseuserview"),
]
operations = [
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_message"),
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_thread"),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Remove empty orphans while migrating<commit_after>from django.db import migrations, models
from django.db.models import Count
def forwards(apps, schema_editor):
Course = apps.get_model("catalog", "Course")
empty_orphans = Course.objects.annotate(
docs=Count("document", distinct=True), cats=Count("categories", distinct=True)
).filter(docs=0, cats=0)
empty_orphans.delete()
class Migration(migrations.Migration):
dependencies = [
("catalog", "0016_alter_category_type_courseuserview"),
]
operations = [
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_message"),
migrations.RunSQL(sql="DROP TABLE IF EXISTS telepathy_thread"),
migrations.RunPython(forwards, migrations.RunPython.noop),
]
|
|
cebb99ca378bd9048351bc8f1aa25b282b21186f
|
client/python/samples/basic/LoggingSample.py
|
client/python/samples/basic/LoggingSample.py
|
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from modeldb.thrift.modeldb import ModelDBService
# create connection to thrift client
host = "localhost"
port = 6543
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ModelDBService.Client(protocol)
transport.open()
# get relevant project ids with case insensitive keys and case sensitive values
projectIds = client.getProjectIds({'author':'test_user'})
# update projects
for projectId in projectIds:
client.updateProject(projectId, 'name', "Sample Logging Project")
# get all model ids
allModelIds = client.getModelIds({})
# get relevant model ids with case sensitive key-value pairs
modelIds = client.getModelIds({'TAG':'train', 'TYPE':'Normal distributions'})
# create and update fields of models
for modelId in modelIds:
# update scalar fields with string values
client.updateField(modelId, 'PATH', 'new/path/to/model')
# create vector fields in nested locations using mongodb's dot notation
# e.g. model[CONFIG][values] = []
vectorConfig = {} # specify configurations for the vector (this is non-functional for now)
client.createVector(modelId, 'CONFIG.values', vectorConfig)
# append to vector fields
values = ['value1', 'value2', 'value3']
for i in xrange(len(values)):
client.addToVectorField(modelId, 'CONFIG.values', values[i])
# update vector fields at a specific index
client.updateField(modelId, 'CONFIG.values.0', 'new value')
# update fields nested within vectors
client.updateField(modelId, 'METRICS.0.TYPE', 'accuracy')
# close thrift client
transport.close()
|
Create sample for new functions
|
Create sample for new functions
|
Python
|
mit
|
mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb,mitdbg/modeldb
|
Create sample for new functions
|
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from modeldb.thrift.modeldb import ModelDBService
# create connection to thrift client
host = "localhost"
port = 6543
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ModelDBService.Client(protocol)
transport.open()
# get relevant project ids with case insensitive keys and case sensitive values
projectIds = client.getProjectIds({'author':'test_user'})
# update projects
for projectId in projectIds:
client.updateProject(projectId, 'name', "Sample Logging Project")
# get all model ids
allModelIds = client.getModelIds({})
# get relevant model ids with case sensitive key-value pairs
modelIds = client.getModelIds({'TAG':'train', 'TYPE':'Normal distributions'})
# create and update fields of models
for modelId in modelIds:
# update scalar fields with string values
client.updateField(modelId, 'PATH', 'new/path/to/model')
# create vector fields in nested locations using mongodb's dot notation
# e.g. model[CONFIG][values] = []
vectorConfig = {} # specify configurations for the vector (this is non-functional for now)
client.createVector(modelId, 'CONFIG.values', vectorConfig)
# append to vector fields
values = ['value1', 'value2', 'value3']
for i in xrange(len(values)):
client.addToVectorField(modelId, 'CONFIG.values', values[i])
# update vector fields at a specific index
client.updateField(modelId, 'CONFIG.values.0', 'new value')
# update fields nested within vectors
client.updateField(modelId, 'METRICS.0.TYPE', 'accuracy')
# close thrift client
transport.close()
|
<commit_before><commit_msg>Create sample for new functions<commit_after>
|
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from modeldb.thrift.modeldb import ModelDBService
# create connection to thrift client
host = "localhost"
port = 6543
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ModelDBService.Client(protocol)
transport.open()
# get relevant project ids with case insensitive keys and case sensitive values
projectIds = client.getProjectIds({'author':'test_user'})
# update projects
for projectId in projectIds:
client.updateProject(projectId, 'name', "Sample Logging Project")
# get all model ids
allModelIds = client.getModelIds({})
# get relevant model ids with case sensitive key-value pairs
modelIds = client.getModelIds({'TAG':'train', 'TYPE':'Normal distributions'})
# create and update fields of models
for modelId in modelIds:
# update scalar fields with string values
client.updateField(modelId, 'PATH', 'new/path/to/model')
# create vector fields in nested locations using mongodb's dot notation
# e.g. model[CONFIG][values] = []
vectorConfig = {} # specify configurations for the vector (this is non-functional for now)
client.createVector(modelId, 'CONFIG.values', vectorConfig)
# append to vector fields
values = ['value1', 'value2', 'value3']
for i in xrange(len(values)):
client.addToVectorField(modelId, 'CONFIG.values', values[i])
# update vector fields at a specific index
client.updateField(modelId, 'CONFIG.values.0', 'new value')
# update fields nested within vectors
client.updateField(modelId, 'METRICS.0.TYPE', 'accuracy')
# close thrift client
transport.close()
|
Create sample for new functionsfrom thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from modeldb.thrift.modeldb import ModelDBService
# create connection to thrift client
host = "localhost"
port = 6543
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ModelDBService.Client(protocol)
transport.open()
# get relevant project ids with case insensitive keys and case sensitive values
projectIds = client.getProjectIds({'author':'test_user'})
# update projects
for projectId in projectIds:
client.updateProject(projectId, 'name', "Sample Logging Project")
# get all model ids
allModelIds = client.getModelIds({})
# get relevant model ids with case sensitive key-value pairs
modelIds = client.getModelIds({'TAG':'train', 'TYPE':'Normal distributions'})
# create and update fields of models
for modelId in modelIds:
# update scalar fields with string values
client.updateField(modelId, 'PATH', 'new/path/to/model')
# create vector fields in nested locations using mongodb's dot notation
# e.g. model[CONFIG][values] = []
vectorConfig = {} # specify configurations for the vector (this is non-functional for now)
client.createVector(modelId, 'CONFIG.values', vectorConfig)
# append to vector fields
values = ['value1', 'value2', 'value3']
for i in xrange(len(values)):
client.addToVectorField(modelId, 'CONFIG.values', values[i])
# update vector fields at a specific index
client.updateField(modelId, 'CONFIG.values.0', 'new value')
# update fields nested within vectors
client.updateField(modelId, 'METRICS.0.TYPE', 'accuracy')
# close thrift client
transport.close()
|
<commit_before><commit_msg>Create sample for new functions<commit_after>from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from modeldb.thrift.modeldb import ModelDBService
# create connection to thrift client
host = "localhost"
port = 6543
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = ModelDBService.Client(protocol)
transport.open()
# get relevant project ids with case insensitive keys and case sensitive values
projectIds = client.getProjectIds({'author':'test_user'})
# update projects
for projectId in projectIds:
client.updateProject(projectId, 'name', "Sample Logging Project")
# get all model ids
allModelIds = client.getModelIds({})
# get relevant model ids with case sensitive key-value pairs
modelIds = client.getModelIds({'TAG':'train', 'TYPE':'Normal distributions'})
# create and update fields of models
for modelId in modelIds:
# update scalar fields with string values
client.updateField(modelId, 'PATH', 'new/path/to/model')
# create vector fields in nested locations using mongodb's dot notation
# e.g. model[CONFIG][values] = []
vectorConfig = {} # specify configurations for the vector (this is non-functional for now)
client.createVector(modelId, 'CONFIG.values', vectorConfig)
# append to vector fields
values = ['value1', 'value2', 'value3']
for i in xrange(len(values)):
client.addToVectorField(modelId, 'CONFIG.values', values[i])
# update vector fields at a specific index
client.updateField(modelId, 'CONFIG.values.0', 'new value')
# update fields nested within vectors
client.updateField(modelId, 'METRICS.0.TYPE', 'accuracy')
# close thrift client
transport.close()
|
|
7c8dac37d4e9cfced65e1c7ec5655c3e78c8de2a
|
accelerator/migrations/0074_update_url_to_community.py
|
accelerator/migrations/0074_update_url_to_community.py
|
# Generated by Django 2.2.10 on 2021-11-05 12:29
from django.db import migrations
def update_url_to_community(apps, schema_editor):
people_url = "/people"
mentor_url = "/directory"
community_url = "/community"
SiteRedirectPage = apps.get_model('accelerator', 'SiteRedirectPage')
for siteredirectpage in SiteRedirectPage.objects.all():
has_old_url = siteredirectpage.objects.filter(new_url=people_url).filter(new_url=mentor_url)
if has_old_url.exists():
has_old_url.update(new_url=community_url)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0073_auto_20210909_1706'),
]
operations = [
migrations.RunPython(update_url_to_community,
migrations.RunPython.noop)
]
|
Remove migrations that arenot relevant to the work
|
[AC-9046] Remove migrations that arenot relevant to the work
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-9046] Remove migrations that arenot relevant to the work
|
# Generated by Django 2.2.10 on 2021-11-05 12:29
from django.db import migrations
def update_url_to_community(apps, schema_editor):
people_url = "/people"
mentor_url = "/directory"
community_url = "/community"
SiteRedirectPage = apps.get_model('accelerator', 'SiteRedirectPage')
for siteredirectpage in SiteRedirectPage.objects.all():
has_old_url = siteredirectpage.objects.filter(new_url=people_url).filter(new_url=mentor_url)
if has_old_url.exists():
has_old_url.update(new_url=community_url)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0073_auto_20210909_1706'),
]
operations = [
migrations.RunPython(update_url_to_community,
migrations.RunPython.noop)
]
|
<commit_before><commit_msg>[AC-9046] Remove migrations that arenot relevant to the work<commit_after>
|
# Generated by Django 2.2.10 on 2021-11-05 12:29
from django.db import migrations
def update_url_to_community(apps, schema_editor):
people_url = "/people"
mentor_url = "/directory"
community_url = "/community"
SiteRedirectPage = apps.get_model('accelerator', 'SiteRedirectPage')
for siteredirectpage in SiteRedirectPage.objects.all():
has_old_url = siteredirectpage.objects.filter(new_url=people_url).filter(new_url=mentor_url)
if has_old_url.exists():
has_old_url.update(new_url=community_url)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0073_auto_20210909_1706'),
]
operations = [
migrations.RunPython(update_url_to_community,
migrations.RunPython.noop)
]
|
[AC-9046] Remove migrations that arenot relevant to the work# Generated by Django 2.2.10 on 2021-11-05 12:29
from django.db import migrations
def update_url_to_community(apps, schema_editor):
people_url = "/people"
mentor_url = "/directory"
community_url = "/community"
SiteRedirectPage = apps.get_model('accelerator', 'SiteRedirectPage')
for siteredirectpage in SiteRedirectPage.objects.all():
has_old_url = siteredirectpage.objects.filter(new_url=people_url).filter(new_url=mentor_url)
if has_old_url.exists():
has_old_url.update(new_url=community_url)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0073_auto_20210909_1706'),
]
operations = [
migrations.RunPython(update_url_to_community,
migrations.RunPython.noop)
]
|
<commit_before><commit_msg>[AC-9046] Remove migrations that arenot relevant to the work<commit_after># Generated by Django 2.2.10 on 2021-11-05 12:29
from django.db import migrations
def update_url_to_community(apps, schema_editor):
people_url = "/people"
mentor_url = "/directory"
community_url = "/community"
SiteRedirectPage = apps.get_model('accelerator', 'SiteRedirectPage')
for siteredirectpage in SiteRedirectPage.objects.all():
has_old_url = siteredirectpage.objects.filter(new_url=people_url).filter(new_url=mentor_url)
if has_old_url.exists():
has_old_url.update(new_url=community_url)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0073_auto_20210909_1706'),
]
operations = [
migrations.RunPython(update_url_to_community,
migrations.RunPython.noop)
]
|
|
67f82f569dbf75ee5555862e21bd1bafbaa5464b
|
corehq/apps/userreports/management/commands/find_datasource_mismatches.py
|
corehq/apps/userreports/management/commands/find_datasource_mismatches.py
|
from __future__ import absolute_import
from __future__ import print_function
import csv
from datetime import datetime
from django.core.management.base import BaseCommand
from corehq.apps.userreports.document_stores import get_document_store
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
help = "Find rows in a datasource that aren't what they should be"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('data_source_id')
def handle(self, domain, data_source_id, *args, **kwargs):
config, _ = get_datasource_config(data_source_id, domain)
adapter = get_indicator_adapter(config)
q = adapter.get_query_object()
document_store = get_document_store(domain, config.referenced_doc_type)
bad_rows = []
for row in with_progress_bar(q, length=q.count()):
doc_id = row.doc_id
doc = document_store.get_document(doc_id)
current_rows = config.get_all_values(doc)
if len(current_rows) > 1:
raise ValueError("this command doesn't work for datasources returning multiple rows per doc")
try:
current_row = current_rows[0]
except KeyError:
continue
# don't compare the 'inserted_at' columns
current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']
for val in current_row:
try:
if getattr(row, val.column.database_column_name) != val.value:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': row.inserted_at.isoformat(),
'stored_value': getattr(row, val.column.database_column_name),
'desired_value': val.value,
})
except AttributeError:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': 'missing',
'stored_value': 'missing',
'desired_value': val.value,
})
filename = 'datasource_mismatches_{}_{}.csv'.format(
data_source_id[-8:],
datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
)
with open(filename, 'w') as f:
writer = csv.DictWriter(f, ['doc_id', 'column_name', 'inserted_at', 'stored_value', 'desired_value'])
writer.writeheader()
writer.writerows(bad_rows)
print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))
|
Add command to find datasource mismatches
|
Add command to find datasource mismatches
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command to find datasource mismatches
|
from __future__ import absolute_import
from __future__ import print_function
import csv
from datetime import datetime
from django.core.management.base import BaseCommand
from corehq.apps.userreports.document_stores import get_document_store
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
help = "Find rows in a datasource that aren't what they should be"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('data_source_id')
def handle(self, domain, data_source_id, *args, **kwargs):
config, _ = get_datasource_config(data_source_id, domain)
adapter = get_indicator_adapter(config)
q = adapter.get_query_object()
document_store = get_document_store(domain, config.referenced_doc_type)
bad_rows = []
for row in with_progress_bar(q, length=q.count()):
doc_id = row.doc_id
doc = document_store.get_document(doc_id)
current_rows = config.get_all_values(doc)
if len(current_rows) > 1:
raise ValueError("this command doesn't work for datasources returning multiple rows per doc")
try:
current_row = current_rows[0]
except KeyError:
continue
# don't compare the 'inserted_at' columns
current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']
for val in current_row:
try:
if getattr(row, val.column.database_column_name) != val.value:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': row.inserted_at.isoformat(),
'stored_value': getattr(row, val.column.database_column_name),
'desired_value': val.value,
})
except AttributeError:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': 'missing',
'stored_value': 'missing',
'desired_value': val.value,
})
filename = 'datasource_mismatches_{}_{}.csv'.format(
data_source_id[-8:],
datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
)
with open(filename, 'w') as f:
writer = csv.DictWriter(f, ['doc_id', 'column_name', 'inserted_at', 'stored_value', 'desired_value'])
writer.writeheader()
writer.writerows(bad_rows)
print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))
|
<commit_before><commit_msg>Add command to find datasource mismatches<commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
import csv
from datetime import datetime
from django.core.management.base import BaseCommand
from corehq.apps.userreports.document_stores import get_document_store
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
help = "Find rows in a datasource that aren't what they should be"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('data_source_id')
def handle(self, domain, data_source_id, *args, **kwargs):
config, _ = get_datasource_config(data_source_id, domain)
adapter = get_indicator_adapter(config)
q = adapter.get_query_object()
document_store = get_document_store(domain, config.referenced_doc_type)
bad_rows = []
for row in with_progress_bar(q, length=q.count()):
doc_id = row.doc_id
doc = document_store.get_document(doc_id)
current_rows = config.get_all_values(doc)
if len(current_rows) > 1:
raise ValueError("this command doesn't work for datasources returning multiple rows per doc")
try:
current_row = current_rows[0]
except KeyError:
continue
# don't compare the 'inserted_at' columns
current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']
for val in current_row:
try:
if getattr(row, val.column.database_column_name) != val.value:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': row.inserted_at.isoformat(),
'stored_value': getattr(row, val.column.database_column_name),
'desired_value': val.value,
})
except AttributeError:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': 'missing',
'stored_value': 'missing',
'desired_value': val.value,
})
filename = 'datasource_mismatches_{}_{}.csv'.format(
data_source_id[-8:],
datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
)
with open(filename, 'w') as f:
writer = csv.DictWriter(f, ['doc_id', 'column_name', 'inserted_at', 'stored_value', 'desired_value'])
writer.writeheader()
writer.writerows(bad_rows)
print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))
|
Add command to find datasource mismatchesfrom __future__ import absolute_import
from __future__ import print_function
import csv
from datetime import datetime
from django.core.management.base import BaseCommand
from corehq.apps.userreports.document_stores import get_document_store
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
help = "Find rows in a datasource that aren't what they should be"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('data_source_id')
def handle(self, domain, data_source_id, *args, **kwargs):
config, _ = get_datasource_config(data_source_id, domain)
adapter = get_indicator_adapter(config)
q = adapter.get_query_object()
document_store = get_document_store(domain, config.referenced_doc_type)
bad_rows = []
for row in with_progress_bar(q, length=q.count()):
doc_id = row.doc_id
doc = document_store.get_document(doc_id)
current_rows = config.get_all_values(doc)
if len(current_rows) > 1:
raise ValueError("this command doesn't work for datasources returning multiple rows per doc")
try:
current_row = current_rows[0]
except KeyError:
continue
# don't compare the 'inserted_at' columns
current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']
for val in current_row:
try:
if getattr(row, val.column.database_column_name) != val.value:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': row.inserted_at.isoformat(),
'stored_value': getattr(row, val.column.database_column_name),
'desired_value': val.value,
})
except AttributeError:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': 'missing',
'stored_value': 'missing',
'desired_value': val.value,
})
filename = 'datasource_mismatches_{}_{}.csv'.format(
data_source_id[-8:],
datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
)
with open(filename, 'w') as f:
writer = csv.DictWriter(f, ['doc_id', 'column_name', 'inserted_at', 'stored_value', 'desired_value'])
writer.writeheader()
writer.writerows(bad_rows)
print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))
|
<commit_before><commit_msg>Add command to find datasource mismatches<commit_after>from __future__ import absolute_import
from __future__ import print_function
import csv
from datetime import datetime
from django.core.management.base import BaseCommand
from corehq.apps.userreports.document_stores import get_document_store
from corehq.apps.userreports.models import get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
help = "Find rows in a datasource that aren't what they should be"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('data_source_id')
def handle(self, domain, data_source_id, *args, **kwargs):
config, _ = get_datasource_config(data_source_id, domain)
adapter = get_indicator_adapter(config)
q = adapter.get_query_object()
document_store = get_document_store(domain, config.referenced_doc_type)
bad_rows = []
for row in with_progress_bar(q, length=q.count()):
doc_id = row.doc_id
doc = document_store.get_document(doc_id)
current_rows = config.get_all_values(doc)
if len(current_rows) > 1:
raise ValueError("this command doesn't work for datasources returning multiple rows per doc")
try:
current_row = current_rows[0]
except KeyError:
continue
# don't compare the 'inserted_at' columns
current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']
for val in current_row:
try:
if getattr(row, val.column.database_column_name) != val.value:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': row.inserted_at.isoformat(),
'stored_value': getattr(row, val.column.database_column_name),
'desired_value': val.value,
})
except AttributeError:
bad_rows.append({
'doc_id': row.doc_id,
'column_name': val.column.database_column_name,
'inserted_at': 'missing',
'stored_value': 'missing',
'desired_value': val.value,
})
filename = 'datasource_mismatches_{}_{}.csv'.format(
data_source_id[-8:],
datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
)
with open(filename, 'w') as f:
writer = csv.DictWriter(f, ['doc_id', 'column_name', 'inserted_at', 'stored_value', 'desired_value'])
writer.writeheader()
writer.writerows(bad_rows)
print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))
|
|
84f824495793eaa780b6681be691fe4d5a10fc28
|
py/fizz-buzz.py
|
py/fizz-buzz.py
|
import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
|
Add py solution for 412. Fizz Buzz
|
Add py solution for 412. Fizz Buzz
412. Fizz Buzz: https://leetcode.com/problems/fizz-buzz/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 412. Fizz Buzz
412. Fizz Buzz: https://leetcode.com/problems/fizz-buzz/
|
import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
|
<commit_before><commit_msg>Add py solution for 412. Fizz Buzz
412. Fizz Buzz: https://leetcode.com/problems/fizz-buzz/<commit_after>
|
import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
|
Add py solution for 412. Fizz Buzz
412. Fizz Buzz: https://leetcode.com/problems/fizz-buzz/import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
|
<commit_before><commit_msg>Add py solution for 412. Fizz Buzz
412. Fizz Buzz: https://leetcode.com/problems/fizz-buzz/<commit_after>import sys
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
ret = []
for i in xrange(1, n + 1):
s = ''
if i % 3 == 0:
s += 'Fizz'
if i % 5 == 0:
s += 'Buzz'
if i % 3 > 0 and i % 5 > 0:
s += str(i)
ret.append(s)
return ret
|
|
d9330450854e5fd7b7e9d038283c8fb80058cc2e
|
scripts/ensure_tilesize.py
|
scripts/ensure_tilesize.py
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
Add script to ensure the correct tile size of a file
|
Add script to ensure the correct tile size of a file
|
Python
|
agpl-3.0
|
htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID
|
Add script to ensure the correct tile size of a file
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
<commit_before><commit_msg>Add script to ensure the correct tile size of a file<commit_after>
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
Add script to ensure the correct tile size of a file#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
<commit_before><commit_msg>Add script to ensure the correct tile size of a file<commit_after>#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
|
16c9303ecc06c282eb15082b99f8eefa76fd4f18
|
bindings/py/tests/temporal_memory_test.py
|
bindings/py/tests/temporal_memory_test.py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
class TemporalMemoryBindingsTest(unittest.TestCase):
@staticmethod
def testIssue807():
# The following should silently pass. Previous versions segfaulted.
# See https://github.com/numenta/nupic.core/issues/807 for context
from nupic.bindings.algorithms import TemporalMemory
tm = TemporalMemory()
tm.compute(set(), True)
|
Add test to exercise code snippet known to cause segfault
|
Add test to exercise code snippet known to cause segfault
|
Python
|
agpl-3.0
|
neuroidss/nupic.core,scottpurdy/nupic.core,breznak/nupic.core,rhyolight/nupic.core,subutai/nupic.core,breznak/nupic.core,subutai/nupic.core,neuroidss/nupic.core,breznak/nupic.core,numenta/nupic.core,numenta/htmresearch-core,ywcui1990/nupic.core,utensil/nupic.core,neuroidss/nupic.core,subutai/nupic.core,numenta/htmresearch-core,numenta/nupic.core,EricSB/nupic.core,numenta/htmresearch-core,rcrowder/nupic.core,pettitda/nupic.core,numenta/nupic.core,numenta/htmresearch-core,EricSB/nupic.core,rcrowder/nupic.core,rhyolight/nupic.core,scottpurdy/nupic.core,pettitda/nupic.core,ywcui1990/nupic.core,EricSB/nupic.core,lscheinkman/nupic.core,rcrowder/nupic.core,lscheinkman/nupic.core,ywcui1990/nupic.core,scottpurdy/nupic.core,numenta/nupic.core,ywcui1990/nupic.core,utensil/nupic.core,pettitda/nupic.core,subutai/nupic.core,utensil/nupic.core,rcrowder/nupic.core,lscheinkman/nupic.core,utensil/nupic.core,pettitda/nupic.core,scottpurdy/nupic.core,lscheinkman/nupic.core,neuroidss/nupic.core,rhyolight/nupic.core,EricSB/nupic.core,breznak/nupic.core,rhyolight/nupic.core
|
Add test to exercise code snippet known to cause segfault
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
class TemporalMemoryBindingsTest(unittest.TestCase):
@staticmethod
def testIssue807():
# The following should silently pass. Previous versions segfaulted.
# See https://github.com/numenta/nupic.core/issues/807 for context
from nupic.bindings.algorithms import TemporalMemory
tm = TemporalMemory()
tm.compute(set(), True)
|
<commit_before><commit_msg>Add test to exercise code snippet known to cause segfault<commit_after>
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
class TemporalMemoryBindingsTest(unittest.TestCase):
@staticmethod
def testIssue807():
# The following should silently pass. Previous versions segfaulted.
# See https://github.com/numenta/nupic.core/issues/807 for context
from nupic.bindings.algorithms import TemporalMemory
tm = TemporalMemory()
tm.compute(set(), True)
|
Add test to exercise code snippet known to cause segfault# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
class TemporalMemoryBindingsTest(unittest.TestCase):
@staticmethod
def testIssue807():
# The following should silently pass. Previous versions segfaulted.
# See https://github.com/numenta/nupic.core/issues/807 for context
from nupic.bindings.algorithms import TemporalMemory
tm = TemporalMemory()
tm.compute(set(), True)
|
<commit_before><commit_msg>Add test to exercise code snippet known to cause segfault<commit_after># ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
class TemporalMemoryBindingsTest(unittest.TestCase):
@staticmethod
def testIssue807():
# The following should silently pass. Previous versions segfaulted.
# See https://github.com/numenta/nupic.core/issues/807 for context
from nupic.bindings.algorithms import TemporalMemory
tm = TemporalMemory()
tm.compute(set(), True)
|
|
e7d60f59e5bedc3a0893185303746c1cbb0d15af
|
app/api/cruds/weekday_crud.py
|
app/api/cruds/weekday_crud.py
|
from django.core.exceptions import ValidationError
import graphene
from graphene_django import DjangoObjectType
from app.timetables.models import Weekday
from .utils import get_errors
class WeekdayNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Weekday
filter_fields = {
'name': ['icontains']
}
filter_order_by = ['name', '-name']
interfaces = (graphene.relay.Node,)
def resolve_original_id(self, args, context, info):
return self.id
class CreateWeekday(graphene.relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
weekday = graphene.Field(WeekdayNode)
errors = graphene.List(graphene.String)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
try:
weekday = Weekday()
weekday.name = input.get('name')
weekday.full_clean()
weekday.save()
return Weekday(weekday=weekday)
except ValidationError as e:
return Weekday(weekday=None, errors=get_errors(e))
|
Create weekday node and createweekday class
|
Create weekday node and createweekday class
|
Python
|
mit
|
teamtaverna/core
|
Create weekday node and createweekday class
|
from django.core.exceptions import ValidationError
import graphene
from graphene_django import DjangoObjectType
from app.timetables.models import Weekday
from .utils import get_errors
class WeekdayNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Weekday
filter_fields = {
'name': ['icontains']
}
filter_order_by = ['name', '-name']
interfaces = (graphene.relay.Node,)
def resolve_original_id(self, args, context, info):
return self.id
class CreateWeekday(graphene.relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
weekday = graphene.Field(WeekdayNode)
errors = graphene.List(graphene.String)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
try:
weekday = Weekday()
weekday.name = input.get('name')
weekday.full_clean()
weekday.save()
return Weekday(weekday=weekday)
except ValidationError as e:
return Weekday(weekday=None, errors=get_errors(e))
|
<commit_before><commit_msg>Create weekday node and createweekday class<commit_after>
|
from django.core.exceptions import ValidationError
import graphene
from graphene_django import DjangoObjectType
from app.timetables.models import Weekday
from .utils import get_errors
class WeekdayNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Weekday
filter_fields = {
'name': ['icontains']
}
filter_order_by = ['name', '-name']
interfaces = (graphene.relay.Node,)
def resolve_original_id(self, args, context, info):
return self.id
class CreateWeekday(graphene.relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
weekday = graphene.Field(WeekdayNode)
errors = graphene.List(graphene.String)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
try:
weekday = Weekday()
weekday.name = input.get('name')
weekday.full_clean()
weekday.save()
return Weekday(weekday=weekday)
except ValidationError as e:
return Weekday(weekday=None, errors=get_errors(e))
|
Create weekday node and createweekday classfrom django.core.exceptions import ValidationError
import graphene
from graphene_django import DjangoObjectType
from app.timetables.models import Weekday
from .utils import get_errors
class WeekdayNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Weekday
filter_fields = {
'name': ['icontains']
}
filter_order_by = ['name', '-name']
interfaces = (graphene.relay.Node,)
def resolve_original_id(self, args, context, info):
return self.id
class CreateWeekday(graphene.relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
weekday = graphene.Field(WeekdayNode)
errors = graphene.List(graphene.String)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
try:
weekday = Weekday()
weekday.name = input.get('name')
weekday.full_clean()
weekday.save()
return Weekday(weekday=weekday)
except ValidationError as e:
return Weekday(weekday=None, errors=get_errors(e))
|
<commit_before><commit_msg>Create weekday node and createweekday class<commit_after>from django.core.exceptions import ValidationError
import graphene
from graphene_django import DjangoObjectType
from app.timetables.models import Weekday
from .utils import get_errors
class WeekdayNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Weekday
filter_fields = {
'name': ['icontains']
}
filter_order_by = ['name', '-name']
interfaces = (graphene.relay.Node,)
def resolve_original_id(self, args, context, info):
return self.id
class CreateWeekday(graphene.relay.ClientIDMutation):
class Input:
name = graphene.String(required=True)
weekday = graphene.Field(WeekdayNode)
errors = graphene.List(graphene.String)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
try:
weekday = Weekday()
weekday.name = input.get('name')
weekday.full_clean()
weekday.save()
return Weekday(weekday=weekday)
except ValidationError as e:
return Weekday(weekday=None, errors=get_errors(e))
|
|
4e29832d5a6ace0bc0adc44a2525872f47dc9b03
|
scripts/plot_config.py
|
scripts/plot_config.py
|
#!/usr/bin/env python
import logging
import sys
import os
import inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
cmd_folder = os.path.realpath(os.path.join(cmd_folder, ".."))
if cmd_folder not in sys.path:
sys.path.insert(0,cmd_folder)
import pickle
import argparse
from graphviz import Digraph
def load_model(model_fn: str):
'''
loads saved model
Arguments
--------
model_fn: str
file name of saved model
Returns
-------
scenario, feature_pre_pipeline, pre_solver, selector, config
'''
with open(model_fn, "br") as fp:
scenario, feature_pre_pipeline, pre_solver, selector, config = pickle.load(
fp)
for fpp in feature_pre_pipeline:
fpp.logger = logging.getLogger("Feature Preprocessing")
if pre_solver:
pre_solver.logger = logging.getLogger("Aspeed PreSolving")
selector.logger = logging.getLogger("Selector")
return scenario, feature_pre_pipeline, pre_solver, selector, config
def visualize(feature_pre_pipeline, pre_solver, selector):
'''
visualize all loaded components
Arguments
---------
feature_pre_pipeline: list
list of fitted feature preprocessors
pre_solver: Aspeed
pre solver object with a saved static schedule
selector: autofolio.selector.*
fitted selector object
'''
dot = Digraph(comment='AutoFolio')
for idx,fpp in enumerate(feature_pre_pipeline):
dot.node('fpp_%d' %(idx), fpp.__class__.__name__)
if idx > 0:
dot.edge('fpp_%d' %(idx-1),'fpp_%d' %(idx))
for idx,presolver in enumerate(pre_solver.schedule):
dot.node('pre_%d' %(idx), "%s for %d sec" %(presolver[0], presolver[1]))
if idx > 0:
dot.edge('pre_%d' %(idx-1),'pre_%d' %(idx))
elif feature_pre_pipeline:
dot.edge('fpp_%d' %(len(feature_pre_pipeline)-1),'pre_%d' %(idx))
if pre_solver:
dot.edge('pre_%d' %(len(pre_solver.schedule)-1), selector.__class__.__name__)
dot.render('test-output/autofolio', view=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load", type=str, default=None,
help="loads model (from --save); other modes are disabled with this options")
args = parser.parse_args()
scenario, feature_pre_pipeline, pre_solver, selector, config = load_model(args.load)
print(config)
visualize(feature_pre_pipeline, pre_solver, selector)
|
ADD a prototype for a plotting script of the learned model
|
ADD a prototype for a plotting script of the learned model
|
Python
|
bsd-2-clause
|
mlindauer/AutoFolio
|
ADD a prototype for a plotting script of the learned model
|
#!/usr/bin/env python
import logging
import sys
import os
import inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
cmd_folder = os.path.realpath(os.path.join(cmd_folder, ".."))
if cmd_folder not in sys.path:
sys.path.insert(0,cmd_folder)
import pickle
import argparse
from graphviz import Digraph
def load_model(model_fn: str):
'''
loads saved model
Arguments
--------
model_fn: str
file name of saved model
Returns
-------
scenario, feature_pre_pipeline, pre_solver, selector, config
'''
with open(model_fn, "br") as fp:
scenario, feature_pre_pipeline, pre_solver, selector, config = pickle.load(
fp)
for fpp in feature_pre_pipeline:
fpp.logger = logging.getLogger("Feature Preprocessing")
if pre_solver:
pre_solver.logger = logging.getLogger("Aspeed PreSolving")
selector.logger = logging.getLogger("Selector")
return scenario, feature_pre_pipeline, pre_solver, selector, config
def visualize(feature_pre_pipeline, pre_solver, selector):
'''
visualize all loaded components
Arguments
---------
feature_pre_pipeline: list
list of fitted feature preprocessors
pre_solver: Aspeed
pre solver object with a saved static schedule
selector: autofolio.selector.*
fitted selector object
'''
dot = Digraph(comment='AutoFolio')
for idx,fpp in enumerate(feature_pre_pipeline):
dot.node('fpp_%d' %(idx), fpp.__class__.__name__)
if idx > 0:
dot.edge('fpp_%d' %(idx-1),'fpp_%d' %(idx))
for idx,presolver in enumerate(pre_solver.schedule):
dot.node('pre_%d' %(idx), "%s for %d sec" %(presolver[0], presolver[1]))
if idx > 0:
dot.edge('pre_%d' %(idx-1),'pre_%d' %(idx))
elif feature_pre_pipeline:
dot.edge('fpp_%d' %(len(feature_pre_pipeline)-1),'pre_%d' %(idx))
if pre_solver:
dot.edge('pre_%d' %(len(pre_solver.schedule)-1), selector.__class__.__name__)
dot.render('test-output/autofolio', view=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load", type=str, default=None,
help="loads model (from --save); other modes are disabled with this options")
args = parser.parse_args()
scenario, feature_pre_pipeline, pre_solver, selector, config = load_model(args.load)
print(config)
visualize(feature_pre_pipeline, pre_solver, selector)
|
<commit_before><commit_msg>ADD a prototype for a plotting script of the learned model<commit_after>
|
#!/usr/bin/env python
import logging
import sys
import os
import inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
cmd_folder = os.path.realpath(os.path.join(cmd_folder, ".."))
if cmd_folder not in sys.path:
sys.path.insert(0,cmd_folder)
import pickle
import argparse
from graphviz import Digraph
def load_model(model_fn: str):
'''
loads saved model
Arguments
--------
model_fn: str
file name of saved model
Returns
-------
scenario, feature_pre_pipeline, pre_solver, selector, config
'''
with open(model_fn, "br") as fp:
scenario, feature_pre_pipeline, pre_solver, selector, config = pickle.load(
fp)
for fpp in feature_pre_pipeline:
fpp.logger = logging.getLogger("Feature Preprocessing")
if pre_solver:
pre_solver.logger = logging.getLogger("Aspeed PreSolving")
selector.logger = logging.getLogger("Selector")
return scenario, feature_pre_pipeline, pre_solver, selector, config
def visualize(feature_pre_pipeline, pre_solver, selector):
'''
visualize all loaded components
Arguments
---------
feature_pre_pipeline: list
list of fitted feature preprocessors
pre_solver: Aspeed
pre solver object with a saved static schedule
selector: autofolio.selector.*
fitted selector object
'''
dot = Digraph(comment='AutoFolio')
for idx,fpp in enumerate(feature_pre_pipeline):
dot.node('fpp_%d' %(idx), fpp.__class__.__name__)
if idx > 0:
dot.edge('fpp_%d' %(idx-1),'fpp_%d' %(idx))
for idx,presolver in enumerate(pre_solver.schedule):
dot.node('pre_%d' %(idx), "%s for %d sec" %(presolver[0], presolver[1]))
if idx > 0:
dot.edge('pre_%d' %(idx-1),'pre_%d' %(idx))
elif feature_pre_pipeline:
dot.edge('fpp_%d' %(len(feature_pre_pipeline)-1),'pre_%d' %(idx))
if pre_solver:
dot.edge('pre_%d' %(len(pre_solver.schedule)-1), selector.__class__.__name__)
dot.render('test-output/autofolio', view=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load", type=str, default=None,
help="loads model (from --save); other modes are disabled with this options")
args = parser.parse_args()
scenario, feature_pre_pipeline, pre_solver, selector, config = load_model(args.load)
print(config)
visualize(feature_pre_pipeline, pre_solver, selector)
|
ADD a prototype for a plotting script of the learned model#!/usr/bin/env python
import logging
import sys
import os
import inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
cmd_folder = os.path.realpath(os.path.join(cmd_folder, ".."))
if cmd_folder not in sys.path:
sys.path.insert(0,cmd_folder)
import pickle
import argparse
from graphviz import Digraph
def load_model(model_fn: str):
'''
loads saved model
Arguments
--------
model_fn: str
file name of saved model
Returns
-------
scenario, feature_pre_pipeline, pre_solver, selector, config
'''
with open(model_fn, "br") as fp:
scenario, feature_pre_pipeline, pre_solver, selector, config = pickle.load(
fp)
for fpp in feature_pre_pipeline:
fpp.logger = logging.getLogger("Feature Preprocessing")
if pre_solver:
pre_solver.logger = logging.getLogger("Aspeed PreSolving")
selector.logger = logging.getLogger("Selector")
return scenario, feature_pre_pipeline, pre_solver, selector, config
def visualize(feature_pre_pipeline, pre_solver, selector):
'''
visualize all loaded components
Arguments
---------
feature_pre_pipeline: list
list of fitted feature preprocessors
pre_solver: Aspeed
pre solver object with a saved static schedule
selector: autofolio.selector.*
fitted selector object
'''
dot = Digraph(comment='AutoFolio')
for idx,fpp in enumerate(feature_pre_pipeline):
dot.node('fpp_%d' %(idx), fpp.__class__.__name__)
if idx > 0:
dot.edge('fpp_%d' %(idx-1),'fpp_%d' %(idx))
for idx,presolver in enumerate(pre_solver.schedule):
dot.node('pre_%d' %(idx), "%s for %d sec" %(presolver[0], presolver[1]))
if idx > 0:
dot.edge('pre_%d' %(idx-1),'pre_%d' %(idx))
elif feature_pre_pipeline:
dot.edge('fpp_%d' %(len(feature_pre_pipeline)-1),'pre_%d' %(idx))
if pre_solver:
dot.edge('pre_%d' %(len(pre_solver.schedule)-1), selector.__class__.__name__)
dot.render('test-output/autofolio', view=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load", type=str, default=None,
help="loads model (from --save); other modes are disabled with this options")
args = parser.parse_args()
scenario, feature_pre_pipeline, pre_solver, selector, config = load_model(args.load)
print(config)
visualize(feature_pre_pipeline, pre_solver, selector)
|
<commit_before><commit_msg>ADD a prototype for a plotting script of the learned model<commit_after>#!/usr/bin/env python
import logging
import sys
import os
import inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
cmd_folder = os.path.realpath(os.path.join(cmd_folder, ".."))
if cmd_folder not in sys.path:
sys.path.insert(0,cmd_folder)
import pickle
import argparse
from graphviz import Digraph
def load_model(model_fn: str):
'''
loads saved model
Arguments
--------
model_fn: str
file name of saved model
Returns
-------
scenario, feature_pre_pipeline, pre_solver, selector, config
'''
with open(model_fn, "br") as fp:
scenario, feature_pre_pipeline, pre_solver, selector, config = pickle.load(
fp)
for fpp in feature_pre_pipeline:
fpp.logger = logging.getLogger("Feature Preprocessing")
if pre_solver:
pre_solver.logger = logging.getLogger("Aspeed PreSolving")
selector.logger = logging.getLogger("Selector")
return scenario, feature_pre_pipeline, pre_solver, selector, config
def visualize(feature_pre_pipeline, pre_solver, selector):
'''
visualize all loaded components
Arguments
---------
feature_pre_pipeline: list
list of fitted feature preprocessors
pre_solver: Aspeed
pre solver object with a saved static schedule
selector: autofolio.selector.*
fitted selector object
'''
dot = Digraph(comment='AutoFolio')
for idx,fpp in enumerate(feature_pre_pipeline):
dot.node('fpp_%d' %(idx), fpp.__class__.__name__)
if idx > 0:
dot.edge('fpp_%d' %(idx-1),'fpp_%d' %(idx))
for idx,presolver in enumerate(pre_solver.schedule):
dot.node('pre_%d' %(idx), "%s for %d sec" %(presolver[0], presolver[1]))
if idx > 0:
dot.edge('pre_%d' %(idx-1),'pre_%d' %(idx))
elif feature_pre_pipeline:
dot.edge('fpp_%d' %(len(feature_pre_pipeline)-1),'pre_%d' %(idx))
if pre_solver:
dot.edge('pre_%d' %(len(pre_solver.schedule)-1), selector.__class__.__name__)
dot.render('test-output/autofolio', view=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--load", type=str, default=None,
help="loads model (from --save); other modes are disabled with this options")
args = parser.parse_args()
scenario, feature_pre_pipeline, pre_solver, selector, config = load_model(args.load)
print(config)
visualize(feature_pre_pipeline, pre_solver, selector)
|
|
1b0e6966d6bd73598e3f9ec49a73a78b0478da10
|
discover_road_runner/famishius/tests/test_vulgaris.py
|
discover_road_runner/famishius/tests/test_vulgaris.py
|
from django.test import SimpleTestCase
class OtherTest(SimpleTestCase):
"""
In reality, apps often can have so many tests this is a more practical
organising pattern.
"""
def test_success(self):
self.assertEqual(1 + 1, 2)
|
Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.
|
Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.
|
Python
|
mit
|
pzrq/discover-road-runner
|
Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.
|
from django.test import SimpleTestCase
class OtherTest(SimpleTestCase):
"""
In reality, apps often can have so many tests this is a more practical
organising pattern.
"""
def test_success(self):
self.assertEqual(1 + 1, 2)
|
<commit_before><commit_msg>Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.<commit_after>
|
from django.test import SimpleTestCase
class OtherTest(SimpleTestCase):
"""
In reality, apps often can have so many tests this is a more practical
organising pattern.
"""
def test_success(self):
self.assertEqual(1 + 1, 2)
|
Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.from django.test import SimpleTestCase
class OtherTest(SimpleTestCase):
"""
In reality, apps often can have so many tests this is a more practical
organising pattern.
"""
def test_success(self):
self.assertEqual(1 + 1, 2)
|
<commit_before><commit_msg>Document from the reality that while breaking apps down is a good idea, even then you often have a sufficient number of topic areas to cover that can't be easily split into more apps, i.e. TestCase classes are insufficient to group the relevant tests together, and a 5k tests.py file is almost certainly a bad idea.<commit_after>from django.test import SimpleTestCase
class OtherTest(SimpleTestCase):
"""
In reality, apps often can have so many tests this is a more practical
organising pattern.
"""
def test_success(self):
self.assertEqual(1 + 1, 2)
|
|
aedf1819edd0bf1668e530cbdb7f6998410e5ada
|
fellowms/tests_urls.py
|
fellowms/tests_urls.py
|
import unittest
from django.test import Client
from django.contrib.auth.models import User
ADMIN_PASSWORD = '123456'
class SimpleTest(unittest.TestCase):
@classmethod
def setUpClass(self):
User.objects.create_superuser('admin',
'admin@fake.software.ac.uk',
ADMIN_PASSWORD)
def setUp(self):
# Every test needs a client.
self.client = Client()
self.admin = Client()
self.admin.login(username='admin',
password=ADMIN_PASSWORD)
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_sign_in(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_fellow(self):
response = self.client.get('/fellow/')
self.assertEqual(response.status_code, 302) # Need admin permission
response = self.admin.get('/fellow/')
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get('/event/')
self.assertEqual(response.status_code, 200)
def test_expense(self):
response = self.client.get('/expense/')
self.assertEqual(response.status_code, 200)
|
Add some tests for url.
|
Add some tests for url.
|
Python
|
bsd-3-clause
|
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
|
Add some tests for url.
|
import unittest
from django.test import Client
from django.contrib.auth.models import User
ADMIN_PASSWORD = '123456'
class SimpleTest(unittest.TestCase):
@classmethod
def setUpClass(self):
User.objects.create_superuser('admin',
'admin@fake.software.ac.uk',
ADMIN_PASSWORD)
def setUp(self):
# Every test needs a client.
self.client = Client()
self.admin = Client()
self.admin.login(username='admin',
password=ADMIN_PASSWORD)
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_sign_in(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_fellow(self):
response = self.client.get('/fellow/')
self.assertEqual(response.status_code, 302) # Need admin permission
response = self.admin.get('/fellow/')
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get('/event/')
self.assertEqual(response.status_code, 200)
def test_expense(self):
response = self.client.get('/expense/')
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add some tests for url.<commit_after>
|
import unittest
from django.test import Client
from django.contrib.auth.models import User
ADMIN_PASSWORD = '123456'
class SimpleTest(unittest.TestCase):
@classmethod
def setUpClass(self):
User.objects.create_superuser('admin',
'admin@fake.software.ac.uk',
ADMIN_PASSWORD)
def setUp(self):
# Every test needs a client.
self.client = Client()
self.admin = Client()
self.admin.login(username='admin',
password=ADMIN_PASSWORD)
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_sign_in(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_fellow(self):
response = self.client.get('/fellow/')
self.assertEqual(response.status_code, 302) # Need admin permission
response = self.admin.get('/fellow/')
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get('/event/')
self.assertEqual(response.status_code, 200)
def test_expense(self):
response = self.client.get('/expense/')
self.assertEqual(response.status_code, 200)
|
Add some tests for url.import unittest
from django.test import Client
from django.contrib.auth.models import User
ADMIN_PASSWORD = '123456'
class SimpleTest(unittest.TestCase):
@classmethod
def setUpClass(self):
User.objects.create_superuser('admin',
'admin@fake.software.ac.uk',
ADMIN_PASSWORD)
def setUp(self):
# Every test needs a client.
self.client = Client()
self.admin = Client()
self.admin.login(username='admin',
password=ADMIN_PASSWORD)
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_sign_in(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_fellow(self):
response = self.client.get('/fellow/')
self.assertEqual(response.status_code, 302) # Need admin permission
response = self.admin.get('/fellow/')
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get('/event/')
self.assertEqual(response.status_code, 200)
def test_expense(self):
response = self.client.get('/expense/')
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add some tests for url.<commit_after>import unittest
from django.test import Client
from django.contrib.auth.models import User
ADMIN_PASSWORD = '123456'
class SimpleTest(unittest.TestCase):
@classmethod
def setUpClass(self):
User.objects.create_superuser('admin',
'admin@fake.software.ac.uk',
ADMIN_PASSWORD)
def setUp(self):
# Every test needs a client.
self.client = Client()
self.admin = Client()
self.admin.login(username='admin',
password=ADMIN_PASSWORD)
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_sign_in(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_fellow(self):
response = self.client.get('/fellow/')
self.assertEqual(response.status_code, 302) # Need admin permission
response = self.admin.get('/fellow/')
self.assertEqual(response.status_code, 200)
def test_event(self):
response = self.client.get('/event/')
self.assertEqual(response.status_code, 200)
def test_expense(self):
response = self.client.get('/expense/')
self.assertEqual(response.status_code, 200)
|
|
7561e95b6fc12852c82ce4d16c38bcb223514358
|
parallel.py
|
parallel.py
|
from IPython import parallel
from sklearn.datasets import fetch_20newsgroups_vectorized
def get_results():
# get data
data = fetch_20newsgroups_vectorized(remove=('headers',
'footers',
'quotes'))
alphas = [1E-4, 1E-3, 1E-2, 1E-1]
# set up dview for imports
clients = parallel.Client()
dview = clients[:]
with dview.sync_imports():
# doesn't seem to like import numpy as np, using numpy instead
import numpy
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import cross_val_score
dview.block = True
# send data to clients
dview['data'] = data
# set up load balanced view for parallel processing
lview = clients.load_balanced_view()
# set blocking to True to get all results once processing is done
lview.block = True
results = lview.map(results_helper, alphas)
# results = dview.apply(results_helper, alphas)
return results
def results_helper(alpha):
clf = MultinomialNB(alpha)
result = (alpha, numpy.mean(cross_val_score(clf, data.data, data.target)))
return result
if __name__ == '__main__':
results = get_results()
best_result = (0, 0)
for result in results:
if result[1] > best_result[1]:
best_result = result
print "\nThe best result is:"
print "alpha = {}".format(best_result[0])
print "score = {}%".format(round(best_result[1] * 100, 2))
|
Create load balanced function to perform cross validation. Use main block to display best result
|
Create load balanced function to perform cross validation. Use main block to display best result
|
Python
|
mit
|
MikeDelaney/sentiment
|
Create load balanced function to perform cross validation. Use main block to display best result
|
from IPython import parallel
from sklearn.datasets import fetch_20newsgroups_vectorized
def get_results():
# get data
data = fetch_20newsgroups_vectorized(remove=('headers',
'footers',
'quotes'))
alphas = [1E-4, 1E-3, 1E-2, 1E-1]
# set up dview for imports
clients = parallel.Client()
dview = clients[:]
with dview.sync_imports():
# doesn't seem to like import numpy as np, using numpy instead
import numpy
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import cross_val_score
dview.block = True
# send data to clients
dview['data'] = data
# set up load balanced view for parallel processing
lview = clients.load_balanced_view()
# set blocking to True to get all results once processing is done
lview.block = True
results = lview.map(results_helper, alphas)
# results = dview.apply(results_helper, alphas)
return results
def results_helper(alpha):
clf = MultinomialNB(alpha)
result = (alpha, numpy.mean(cross_val_score(clf, data.data, data.target)))
return result
if __name__ == '__main__':
results = get_results()
best_result = (0, 0)
for result in results:
if result[1] > best_result[1]:
best_result = result
print "\nThe best result is:"
print "alpha = {}".format(best_result[0])
print "score = {}%".format(round(best_result[1] * 100, 2))
|
<commit_before><commit_msg>Create load balanced function to perform cross validation. Use main block to display best result<commit_after>
|
from IPython import parallel
from sklearn.datasets import fetch_20newsgroups_vectorized
def get_results():
# get data
data = fetch_20newsgroups_vectorized(remove=('headers',
'footers',
'quotes'))
alphas = [1E-4, 1E-3, 1E-2, 1E-1]
# set up dview for imports
clients = parallel.Client()
dview = clients[:]
with dview.sync_imports():
# doesn't seem to like import numpy as np, using numpy instead
import numpy
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import cross_val_score
dview.block = True
# send data to clients
dview['data'] = data
# set up load balanced view for parallel processing
lview = clients.load_balanced_view()
# set blocking to True to get all results once processing is done
lview.block = True
results = lview.map(results_helper, alphas)
# results = dview.apply(results_helper, alphas)
return results
def results_helper(alpha):
clf = MultinomialNB(alpha)
result = (alpha, numpy.mean(cross_val_score(clf, data.data, data.target)))
return result
if __name__ == '__main__':
results = get_results()
best_result = (0, 0)
for result in results:
if result[1] > best_result[1]:
best_result = result
print "\nThe best result is:"
print "alpha = {}".format(best_result[0])
print "score = {}%".format(round(best_result[1] * 100, 2))
|
Create load balanced function to perform cross validation. Use main block to display best resultfrom IPython import parallel
from sklearn.datasets import fetch_20newsgroups_vectorized
def get_results():
# get data
data = fetch_20newsgroups_vectorized(remove=('headers',
'footers',
'quotes'))
alphas = [1E-4, 1E-3, 1E-2, 1E-1]
# set up dview for imports
clients = parallel.Client()
dview = clients[:]
with dview.sync_imports():
# doesn't seem to like import numpy as np, using numpy instead
import numpy
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import cross_val_score
dview.block = True
# send data to clients
dview['data'] = data
# set up load balanced view for parallel processing
lview = clients.load_balanced_view()
# set blocking to True to get all results once processing is done
lview.block = True
results = lview.map(results_helper, alphas)
# results = dview.apply(results_helper, alphas)
return results
def results_helper(alpha):
clf = MultinomialNB(alpha)
result = (alpha, numpy.mean(cross_val_score(clf, data.data, data.target)))
return result
if __name__ == '__main__':
results = get_results()
best_result = (0, 0)
for result in results:
if result[1] > best_result[1]:
best_result = result
print "\nThe best result is:"
print "alpha = {}".format(best_result[0])
print "score = {}%".format(round(best_result[1] * 100, 2))
|
<commit_before><commit_msg>Create load balanced function to perform cross validation. Use main block to display best result<commit_after>from IPython import parallel
from sklearn.datasets import fetch_20newsgroups_vectorized
def get_results():
# get data
data = fetch_20newsgroups_vectorized(remove=('headers',
'footers',
'quotes'))
alphas = [1E-4, 1E-3, 1E-2, 1E-1]
# set up dview for imports
clients = parallel.Client()
dview = clients[:]
with dview.sync_imports():
# doesn't seem to like import numpy as np, using numpy instead
import numpy
from sklearn.naive_bayes import MultinomialNB
from sklearn.cross_validation import cross_val_score
dview.block = True
# send data to clients
dview['data'] = data
# set up load balanced view for parallel processing
lview = clients.load_balanced_view()
# set blocking to True to get all results once processing is done
lview.block = True
results = lview.map(results_helper, alphas)
# results = dview.apply(results_helper, alphas)
return results
def results_helper(alpha):
clf = MultinomialNB(alpha)
result = (alpha, numpy.mean(cross_val_score(clf, data.data, data.target)))
return result
if __name__ == '__main__':
results = get_results()
best_result = (0, 0)
for result in results:
if result[1] > best_result[1]:
best_result = result
print "\nThe best result is:"
print "alpha = {}".format(best_result[0])
print "score = {}%".format(round(best_result[1] * 100, 2))
|
|
00997a5416447ecfb00565bface9ee109a187b61
|
tests/test_experiment_groups/test_search_managers.py
|
tests/test_experiment_groups/test_search_managers.py
|
from django.test import override_settings
from polyaxon_schemas.settings import SettingsConfig
from experiment_groups.iteration_managers import (
HyperbandIterationManager,
get_search_iteration_manager
)
from experiment_groups.models import ExperimentGroupIteration
from experiment_groups.search_managers import GridSearchManager, get_search_algorithm_manager, \
RandomSearchManager, HyperbandSearchManager
from factories.factory_experiment_groups import ExperimentGroupFactory
from factories.factory_experiments import ExperimentFactory
from factories.fixtures import (
experiment_group_spec_content_early_stopping,
experiment_group_spec_content_hyperband
)
from tests.utils import BaseTest
@override_settings(DEPLOY_RUNNER=False)
class TestIterationManagers(BaseTest):
def test_get_search_iteration_manager(self):
# Grid search
experiment_group = ExperimentGroupFactory()
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
GridSearchManager)
# Random search
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_early_stopping)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
RandomSearchManager)
# Hyperband
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_hyperband)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
HyperbandSearchManager)
@override_settings(DEPLOY_RUNNER=False)
class TestGridSearchManager(BaseTest):
def test_get_suggestions(self):
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {'feature': {'values': [1, 2, 3]}}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 3
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {
'feature1': {'values': [1, 2, 3]},
'feature2': {'linspace': [1, 2, 5]},
'feature3': {'range': [1, 5, 1]}
}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 10
|
Add experiment groups search managers tests
|
Add experiment groups search managers tests
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add experiment groups search managers tests
|
from django.test import override_settings
from polyaxon_schemas.settings import SettingsConfig
from experiment_groups.iteration_managers import (
HyperbandIterationManager,
get_search_iteration_manager
)
from experiment_groups.models import ExperimentGroupIteration
from experiment_groups.search_managers import GridSearchManager, get_search_algorithm_manager, \
RandomSearchManager, HyperbandSearchManager
from factories.factory_experiment_groups import ExperimentGroupFactory
from factories.factory_experiments import ExperimentFactory
from factories.fixtures import (
experiment_group_spec_content_early_stopping,
experiment_group_spec_content_hyperband
)
from tests.utils import BaseTest
@override_settings(DEPLOY_RUNNER=False)
class TestIterationManagers(BaseTest):
def test_get_search_iteration_manager(self):
# Grid search
experiment_group = ExperimentGroupFactory()
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
GridSearchManager)
# Random search
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_early_stopping)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
RandomSearchManager)
# Hyperband
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_hyperband)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
HyperbandSearchManager)
@override_settings(DEPLOY_RUNNER=False)
class TestGridSearchManager(BaseTest):
def test_get_suggestions(self):
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {'feature': {'values': [1, 2, 3]}}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 3
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {
'feature1': {'values': [1, 2, 3]},
'feature2': {'linspace': [1, 2, 5]},
'feature3': {'range': [1, 5, 1]}
}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 10
|
<commit_before><commit_msg>Add experiment groups search managers tests<commit_after>
|
from django.test import override_settings
from polyaxon_schemas.settings import SettingsConfig
from experiment_groups.iteration_managers import (
HyperbandIterationManager,
get_search_iteration_manager
)
from experiment_groups.models import ExperimentGroupIteration
from experiment_groups.search_managers import GridSearchManager, get_search_algorithm_manager, \
RandomSearchManager, HyperbandSearchManager
from factories.factory_experiment_groups import ExperimentGroupFactory
from factories.factory_experiments import ExperimentFactory
from factories.fixtures import (
experiment_group_spec_content_early_stopping,
experiment_group_spec_content_hyperband
)
from tests.utils import BaseTest
@override_settings(DEPLOY_RUNNER=False)
class TestIterationManagers(BaseTest):
def test_get_search_iteration_manager(self):
# Grid search
experiment_group = ExperimentGroupFactory()
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
GridSearchManager)
# Random search
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_early_stopping)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
RandomSearchManager)
# Hyperband
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_hyperband)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
HyperbandSearchManager)
@override_settings(DEPLOY_RUNNER=False)
class TestGridSearchManager(BaseTest):
def test_get_suggestions(self):
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {'feature': {'values': [1, 2, 3]}}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 3
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {
'feature1': {'values': [1, 2, 3]},
'feature2': {'linspace': [1, 2, 5]},
'feature3': {'range': [1, 5, 1]}
}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 10
|
Add experiment groups search managers testsfrom django.test import override_settings
from polyaxon_schemas.settings import SettingsConfig
from experiment_groups.iteration_managers import (
HyperbandIterationManager,
get_search_iteration_manager
)
from experiment_groups.models import ExperimentGroupIteration
from experiment_groups.search_managers import GridSearchManager, get_search_algorithm_manager, \
RandomSearchManager, HyperbandSearchManager
from factories.factory_experiment_groups import ExperimentGroupFactory
from factories.factory_experiments import ExperimentFactory
from factories.fixtures import (
experiment_group_spec_content_early_stopping,
experiment_group_spec_content_hyperband
)
from tests.utils import BaseTest
@override_settings(DEPLOY_RUNNER=False)
class TestIterationManagers(BaseTest):
def test_get_search_iteration_manager(self):
# Grid search
experiment_group = ExperimentGroupFactory()
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
GridSearchManager)
# Random search
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_early_stopping)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
RandomSearchManager)
# Hyperband
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_hyperband)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
HyperbandSearchManager)
@override_settings(DEPLOY_RUNNER=False)
class TestGridSearchManager(BaseTest):
def test_get_suggestions(self):
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {'feature': {'values': [1, 2, 3]}}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 3
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {
'feature1': {'values': [1, 2, 3]},
'feature2': {'linspace': [1, 2, 5]},
'feature3': {'range': [1, 5, 1]}
}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 10
|
<commit_before><commit_msg>Add experiment groups search managers tests<commit_after>from django.test import override_settings
from polyaxon_schemas.settings import SettingsConfig
from experiment_groups.iteration_managers import (
HyperbandIterationManager,
get_search_iteration_manager
)
from experiment_groups.models import ExperimentGroupIteration
from experiment_groups.search_managers import GridSearchManager, get_search_algorithm_manager, \
RandomSearchManager, HyperbandSearchManager
from factories.factory_experiment_groups import ExperimentGroupFactory
from factories.factory_experiments import ExperimentFactory
from factories.fixtures import (
experiment_group_spec_content_early_stopping,
experiment_group_spec_content_hyperband
)
from tests.utils import BaseTest
@override_settings(DEPLOY_RUNNER=False)
class TestIterationManagers(BaseTest):
def test_get_search_iteration_manager(self):
# Grid search
experiment_group = ExperimentGroupFactory()
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
GridSearchManager)
# Random search
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_early_stopping)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
RandomSearchManager)
# Hyperband
experiment_group = ExperimentGroupFactory(
content=experiment_group_spec_content_hyperband)
assert isinstance(get_search_algorithm_manager(experiment_group.params_config),
HyperbandSearchManager)
@override_settings(DEPLOY_RUNNER=False)
class TestGridSearchManager(BaseTest):
def test_get_suggestions(self):
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {'feature': {'values': [1, 2, 3]}}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 3
params_config = SettingsConfig.from_dict({
'concurrent_experiments': 2,
'grid_search': {'n_experiments': 10},
'matrix': {
'feature1': {'values': [1, 2, 3]},
'feature2': {'linspace': [1, 2, 5]},
'feature3': {'range': [1, 5, 1]}
}
})
manager = GridSearchManager(params_config=params_config)
assert len(manager.get_suggestions()) == 10
|
|
6a7f25bd6303fd932632b2bd9dfe9ca010522c00
|
cache_analysis/cache_model.py
|
cache_analysis/cache_model.py
|
# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
# This class models a single cache set of a cache that uses "Bit-PLRU", as
# described in https://en.wikipedia.org/wiki/Pseudo-LRU.
class CacheBitPLRU(object):
def __init__(self, num_ways):
self.mru_bits = [False] * num_ways
self.addr_to_way = {}
self.way_to_addr = [None] * num_ways
for way in xrange(num_ways):
self.mru_bits[way] = bool(random.randrange(2))
def _evict(self):
for way in xrange(len(self.mru_bits)):
if not self.mru_bits[way]:
return way
# All MRU bits were set, so reset them all to zero.
for way in xrange(len(self.mru_bits)):
self.mru_bits[way] = False
return 0
def lookup(self, addr):
way = self.addr_to_way.get(addr)
is_miss = way is None
if is_miss:
way = self._evict()
# Evict old address.
old_addr = self.way_to_addr[way]
if old_addr is not None:
del self.addr_to_way[old_addr]
self.addr_to_way[addr] = way
self.way_to_addr[way] = addr
# Mark as recently used.
self.mru_bits[way] = True
return is_miss
def mru_state(self):
return ''.join(str(int(x)) for x in self.mru_bits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--show-state', '-s', action='store_true')
args = parser.parse_args()
ways = 12
cache = CacheBitPLRU(ways)
# Try a "rowhammer optimal" ordering of addresses to access. This should
# generate cache misses on just two specific addresses on each iteration.
addr_order = ([100] + range(ways - 1) +
[101] + range(ways - 1))
print 'ordering of addresses to access:', addr_order
for run in xrange(30):
results = []
for addr in addr_order:
results.append(cache.lookup(addr))
if args.show_state:
print 'state:', cache.mru_state()
print 'misses:', ''.join(str(int(x)) for x in results)
if __name__ == '__main__':
main()
|
Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache
|
Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache
|
Python
|
apache-2.0
|
kevinmel2000/rowhammer-test,shekkbuilder/rowhammer-test,kevinmel2000/rowhammer-test,kevinmel2000/rowhammer-test,kevinmel2000/rowhammer-test,shekkbuilder/rowhammer-test,shekkbuilder/rowhammer-test,shekkbuilder/rowhammer-test
|
Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache
|
# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
# This class models a single cache set of a cache that uses "Bit-PLRU", as
# described in https://en.wikipedia.org/wiki/Pseudo-LRU.
class CacheBitPLRU(object):
def __init__(self, num_ways):
self.mru_bits = [False] * num_ways
self.addr_to_way = {}
self.way_to_addr = [None] * num_ways
for way in xrange(num_ways):
self.mru_bits[way] = bool(random.randrange(2))
def _evict(self):
for way in xrange(len(self.mru_bits)):
if not self.mru_bits[way]:
return way
# All MRU bits were set, so reset them all to zero.
for way in xrange(len(self.mru_bits)):
self.mru_bits[way] = False
return 0
def lookup(self, addr):
way = self.addr_to_way.get(addr)
is_miss = way is None
if is_miss:
way = self._evict()
# Evict old address.
old_addr = self.way_to_addr[way]
if old_addr is not None:
del self.addr_to_way[old_addr]
self.addr_to_way[addr] = way
self.way_to_addr[way] = addr
# Mark as recently used.
self.mru_bits[way] = True
return is_miss
def mru_state(self):
return ''.join(str(int(x)) for x in self.mru_bits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--show-state', '-s', action='store_true')
args = parser.parse_args()
ways = 12
cache = CacheBitPLRU(ways)
# Try a "rowhammer optimal" ordering of addresses to access. This should
# generate cache misses on just two specific addresses on each iteration.
addr_order = ([100] + range(ways - 1) +
[101] + range(ways - 1))
print 'ordering of addresses to access:', addr_order
for run in xrange(30):
results = []
for addr in addr_order:
results.append(cache.lookup(addr))
if args.show_state:
print 'state:', cache.mru_state()
print 'misses:', ''.join(str(int(x)) for x in results)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache<commit_after>
|
# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
# This class models a single cache set of a cache that uses "Bit-PLRU", as
# described in https://en.wikipedia.org/wiki/Pseudo-LRU.
class CacheBitPLRU(object):
def __init__(self, num_ways):
self.mru_bits = [False] * num_ways
self.addr_to_way = {}
self.way_to_addr = [None] * num_ways
for way in xrange(num_ways):
self.mru_bits[way] = bool(random.randrange(2))
def _evict(self):
for way in xrange(len(self.mru_bits)):
if not self.mru_bits[way]:
return way
# All MRU bits were set, so reset them all to zero.
for way in xrange(len(self.mru_bits)):
self.mru_bits[way] = False
return 0
def lookup(self, addr):
way = self.addr_to_way.get(addr)
is_miss = way is None
if is_miss:
way = self._evict()
# Evict old address.
old_addr = self.way_to_addr[way]
if old_addr is not None:
del self.addr_to_way[old_addr]
self.addr_to_way[addr] = way
self.way_to_addr[way] = addr
# Mark as recently used.
self.mru_bits[way] = True
return is_miss
def mru_state(self):
return ''.join(str(int(x)) for x in self.mru_bits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--show-state', '-s', action='store_true')
args = parser.parse_args()
ways = 12
cache = CacheBitPLRU(ways)
# Try a "rowhammer optimal" ordering of addresses to access. This should
# generate cache misses on just two specific addresses on each iteration.
addr_order = ([100] + range(ways - 1) +
[101] + range(ways - 1))
print 'ordering of addresses to access:', addr_order
for run in xrange(30):
results = []
for addr in addr_order:
results.append(cache.lookup(addr))
if args.show_state:
print 'state:', cache.mru_state()
print 'misses:', ''.join(str(int(x)) for x in results)
if __name__ == '__main__':
main()
|
Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache# Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
# This class models a single cache set of a cache that uses "Bit-PLRU", as
# described in https://en.wikipedia.org/wiki/Pseudo-LRU.
class CacheBitPLRU(object):
def __init__(self, num_ways):
self.mru_bits = [False] * num_ways
self.addr_to_way = {}
self.way_to_addr = [None] * num_ways
for way in xrange(num_ways):
self.mru_bits[way] = bool(random.randrange(2))
def _evict(self):
for way in xrange(len(self.mru_bits)):
if not self.mru_bits[way]:
return way
# All MRU bits were set, so reset them all to zero.
for way in xrange(len(self.mru_bits)):
self.mru_bits[way] = False
return 0
def lookup(self, addr):
way = self.addr_to_way.get(addr)
is_miss = way is None
if is_miss:
way = self._evict()
# Evict old address.
old_addr = self.way_to_addr[way]
if old_addr is not None:
del self.addr_to_way[old_addr]
self.addr_to_way[addr] = way
self.way_to_addr[way] = addr
# Mark as recently used.
self.mru_bits[way] = True
return is_miss
def mru_state(self):
return ''.join(str(int(x)) for x in self.mru_bits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--show-state', '-s', action='store_true')
args = parser.parse_args()
ways = 12
cache = CacheBitPLRU(ways)
# Try a "rowhammer optimal" ordering of addresses to access. This should
# generate cache misses on just two specific addresses on each iteration.
addr_order = ([100] + range(ways - 1) +
[101] + range(ways - 1))
print 'ordering of addresses to access:', addr_order
for run in xrange(30):
results = []
for addr in addr_order:
results.append(cache.lookup(addr))
if args.show_state:
print 'state:', cache.mru_state()
print 'misses:', ''.join(str(int(x)) for x in results)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add model/simulation of cache eviction with a Bit-Pseudo-LRU cache<commit_after># Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
# This class models a single cache set of a cache that uses "Bit-PLRU", as
# described in https://en.wikipedia.org/wiki/Pseudo-LRU.
class CacheBitPLRU(object):
def __init__(self, num_ways):
self.mru_bits = [False] * num_ways
self.addr_to_way = {}
self.way_to_addr = [None] * num_ways
for way in xrange(num_ways):
self.mru_bits[way] = bool(random.randrange(2))
def _evict(self):
for way in xrange(len(self.mru_bits)):
if not self.mru_bits[way]:
return way
# All MRU bits were set, so reset them all to zero.
for way in xrange(len(self.mru_bits)):
self.mru_bits[way] = False
return 0
def lookup(self, addr):
way = self.addr_to_way.get(addr)
is_miss = way is None
if is_miss:
way = self._evict()
# Evict old address.
old_addr = self.way_to_addr[way]
if old_addr is not None:
del self.addr_to_way[old_addr]
self.addr_to_way[addr] = way
self.way_to_addr[way] = addr
# Mark as recently used.
self.mru_bits[way] = True
return is_miss
def mru_state(self):
return ''.join(str(int(x)) for x in self.mru_bits)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--show-state', '-s', action='store_true')
args = parser.parse_args()
ways = 12
cache = CacheBitPLRU(ways)
# Try a "rowhammer optimal" ordering of addresses to access. This should
# generate cache misses on just two specific addresses on each iteration.
addr_order = ([100] + range(ways - 1) +
[101] + range(ways - 1))
print 'ordering of addresses to access:', addr_order
for run in xrange(30):
results = []
for addr in addr_order:
results.append(cache.lookup(addr))
if args.show_state:
print 'state:', cache.mru_state()
print 'misses:', ''.join(str(int(x)) for x in results)
if __name__ == '__main__':
main()
|
|
5fdbb45c38c2bf542184ed8e7d750b3cfaa6fbb4
|
plugins/raw_data_filter.py
|
plugins/raw_data_filter.py
|
"""Show how to add a custom raw data filter for the TraceView and Waveform View
Use Alt+R in the GUI to toggle the filter.
"""
import numpy as np
from scipy.signal import butter, lfilter
from phy import IPlugin
class RawDataFilterPlugin(IPlugin):
def attach_to_controller(self, controller):
b, a = butter(3, 150.0 / controller.model.sample_rate * 2.0, 'high')
@controller.raw_data_filter.add_filter
def high_pass(arr, axis=0):
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
return arr
|
Add raw data filter plugin
|
Add raw data filter plugin
|
Python
|
bsd-3-clause
|
kwikteam/phy,kwikteam/phy,kwikteam/phy
|
Add raw data filter plugin
|
"""Show how to add a custom raw data filter for the TraceView and Waveform View
Use Alt+R in the GUI to toggle the filter.
"""
import numpy as np
from scipy.signal import butter, lfilter
from phy import IPlugin
class RawDataFilterPlugin(IPlugin):
def attach_to_controller(self, controller):
b, a = butter(3, 150.0 / controller.model.sample_rate * 2.0, 'high')
@controller.raw_data_filter.add_filter
def high_pass(arr, axis=0):
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
return arr
|
<commit_before><commit_msg>Add raw data filter plugin<commit_after>
|
"""Show how to add a custom raw data filter for the TraceView and Waveform View
Use Alt+R in the GUI to toggle the filter.
"""
import numpy as np
from scipy.signal import butter, lfilter
from phy import IPlugin
class RawDataFilterPlugin(IPlugin):
def attach_to_controller(self, controller):
b, a = butter(3, 150.0 / controller.model.sample_rate * 2.0, 'high')
@controller.raw_data_filter.add_filter
def high_pass(arr, axis=0):
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
return arr
|
Add raw data filter plugin"""Show how to add a custom raw data filter for the TraceView and Waveform View
Use Alt+R in the GUI to toggle the filter.
"""
import numpy as np
from scipy.signal import butter, lfilter
from phy import IPlugin
class RawDataFilterPlugin(IPlugin):
def attach_to_controller(self, controller):
b, a = butter(3, 150.0 / controller.model.sample_rate * 2.0, 'high')
@controller.raw_data_filter.add_filter
def high_pass(arr, axis=0):
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
return arr
|
<commit_before><commit_msg>Add raw data filter plugin<commit_after>"""Show how to add a custom raw data filter for the TraceView and Waveform View
Use Alt+R in the GUI to toggle the filter.
"""
import numpy as np
from scipy.signal import butter, lfilter
from phy import IPlugin
class RawDataFilterPlugin(IPlugin):
def attach_to_controller(self, controller):
b, a = butter(3, 150.0 / controller.model.sample_rate * 2.0, 'high')
@controller.raw_data_filter.add_filter
def high_pass(arr, axis=0):
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
arr = lfilter(b, a, arr, axis=axis)
arr = np.flip(arr, axis=axis)
return arr
|
|
e22173b8492fefe3d562f7efe684aa560772d757
|
insertion_sort_list.py
|
insertion_sort_list.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if None == head:
return None
#记录尾部节点
tail_node = head
next_node = head.next
while None != next_node:
if head.val >= next_node.val:
## 更新头指针和next_node
tmp = next_node.next
next_node.next = head
head = next_node
next_node = tmp
elif tail_node.val <= next_node.val:
#小于或等于尾部节点
tail_node.next = next_node
tail_node = next_node
next_node = next_node.next
else:
#从头部查找合适位置进行插入操作
tmp = head.next
prev = head
while None != tmp:
if tmp.val > next_node.val:
break;
prev = tmp
tmp = tmp.next
prev.next = next_node
next_node = next_node.next
prev.next.next = tmp
tail_next = next_node #None
return head
if __name__ == "__main__":
s = Solution()
head = ListNode(4)
node_1 = ListNode(1)
head.next = node_1
node_2 = ListNode(3)
node_1.next = node_2
node_3 = ListNode(5)
node_2.next = node_3
node_4 = ListNode(9)
node_3.next = node_4
node_5 = ListNode(2)
node_4.next = node_5
node_6 = ListNode(8)
node_5.next = node_6
node_7 = ListNode(13)
node_6.next = node_7
node = s.insertionSortList(head)
while None != node:
print node.val
node = node.next
|
Sort a linked list using insertion sort
|
Sort a linked list using insertion sort
|
Python
|
apache-2.0
|
don7hao/leetcode_oj,don7hao/leetcode_oj
|
Sort a linked list using insertion sort
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if None == head:
return None
#记录尾部节点
tail_node = head
next_node = head.next
while None != next_node:
if head.val >= next_node.val:
## 更新头指针和next_node
tmp = next_node.next
next_node.next = head
head = next_node
next_node = tmp
elif tail_node.val <= next_node.val:
#小于或等于尾部节点
tail_node.next = next_node
tail_node = next_node
next_node = next_node.next
else:
#从头部查找合适位置进行插入操作
tmp = head.next
prev = head
while None != tmp:
if tmp.val > next_node.val:
break;
prev = tmp
tmp = tmp.next
prev.next = next_node
next_node = next_node.next
prev.next.next = tmp
tail_next = next_node #None
return head
if __name__ == "__main__":
s = Solution()
head = ListNode(4)
node_1 = ListNode(1)
head.next = node_1
node_2 = ListNode(3)
node_1.next = node_2
node_3 = ListNode(5)
node_2.next = node_3
node_4 = ListNode(9)
node_3.next = node_4
node_5 = ListNode(2)
node_4.next = node_5
node_6 = ListNode(8)
node_5.next = node_6
node_7 = ListNode(13)
node_6.next = node_7
node = s.insertionSortList(head)
while None != node:
print node.val
node = node.next
|
<commit_before><commit_msg>Sort a linked list using insertion sort<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if None == head:
return None
#记录尾部节点
tail_node = head
next_node = head.next
while None != next_node:
if head.val >= next_node.val:
## 更新头指针和next_node
tmp = next_node.next
next_node.next = head
head = next_node
next_node = tmp
elif tail_node.val <= next_node.val:
#小于或等于尾部节点
tail_node.next = next_node
tail_node = next_node
next_node = next_node.next
else:
#从头部查找合适位置进行插入操作
tmp = head.next
prev = head
while None != tmp:
if tmp.val > next_node.val:
break;
prev = tmp
tmp = tmp.next
prev.next = next_node
next_node = next_node.next
prev.next.next = tmp
tail_next = next_node #None
return head
if __name__ == "__main__":
s = Solution()
head = ListNode(4)
node_1 = ListNode(1)
head.next = node_1
node_2 = ListNode(3)
node_1.next = node_2
node_3 = ListNode(5)
node_2.next = node_3
node_4 = ListNode(9)
node_3.next = node_4
node_5 = ListNode(2)
node_4.next = node_5
node_6 = ListNode(8)
node_5.next = node_6
node_7 = ListNode(13)
node_6.next = node_7
node = s.insertionSortList(head)
while None != node:
print node.val
node = node.next
|
Sort a linked list using insertion sort#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if None == head:
return None
#记录尾部节点
tail_node = head
next_node = head.next
while None != next_node:
if head.val >= next_node.val:
## 更新头指针和next_node
tmp = next_node.next
next_node.next = head
head = next_node
next_node = tmp
elif tail_node.val <= next_node.val:
#小于或等于尾部节点
tail_node.next = next_node
tail_node = next_node
next_node = next_node.next
else:
#从头部查找合适位置进行插入操作
tmp = head.next
prev = head
while None != tmp:
if tmp.val > next_node.val:
break;
prev = tmp
tmp = tmp.next
prev.next = next_node
next_node = next_node.next
prev.next.next = tmp
tail_next = next_node #None
return head
if __name__ == "__main__":
s = Solution()
head = ListNode(4)
node_1 = ListNode(1)
head.next = node_1
node_2 = ListNode(3)
node_1.next = node_2
node_3 = ListNode(5)
node_2.next = node_3
node_4 = ListNode(9)
node_3.next = node_4
node_5 = ListNode(2)
node_4.next = node_5
node_6 = ListNode(8)
node_5.next = node_6
node_7 = ListNode(13)
node_6.next = node_7
node = s.insertionSortList(head)
while None != node:
print node.val
node = node.next
|
<commit_before><commit_msg>Sort a linked list using insertion sort<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if None == head:
return None
#记录尾部节点
tail_node = head
next_node = head.next
while None != next_node:
if head.val >= next_node.val:
## 更新头指针和next_node
tmp = next_node.next
next_node.next = head
head = next_node
next_node = tmp
elif tail_node.val <= next_node.val:
#小于或等于尾部节点
tail_node.next = next_node
tail_node = next_node
next_node = next_node.next
else:
#从头部查找合适位置进行插入操作
tmp = head.next
prev = head
while None != tmp:
if tmp.val > next_node.val:
break;
prev = tmp
tmp = tmp.next
prev.next = next_node
next_node = next_node.next
prev.next.next = tmp
tail_next = next_node #None
return head
if __name__ == "__main__":
s = Solution()
head = ListNode(4)
node_1 = ListNode(1)
head.next = node_1
node_2 = ListNode(3)
node_1.next = node_2
node_3 = ListNode(5)
node_2.next = node_3
node_4 = ListNode(9)
node_3.next = node_4
node_5 = ListNode(2)
node_4.next = node_5
node_6 = ListNode(8)
node_5.next = node_6
node_7 = ListNode(13)
node_6.next = node_7
node = s.insertionSortList(head)
while None != node:
print node.val
node = node.next
|
|
6df2413f0207bf800697c430c4e6f1b1c46bbac9
|
python/ellipsoid3DShape.py
|
python/ellipsoid3DShape.py
|
'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
from tqdm import tqdm
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
# Operations
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.
|
Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.
|
Python
|
mit
|
agrawalabhishek/NAOS,agrawalabhishek/NAOS,agrawalabhishek/NAOS
|
Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.
|
'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
from tqdm import tqdm
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
# Operations
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
<commit_before><commit_msg>Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.<commit_after>
|
'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
from tqdm import tqdm
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
# Operations
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
from tqdm import tqdm
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
# Operations
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
<commit_before><commit_msg>Add directory to story python plotting scripts. Add script to plot a 3D ellipsoid.<commit_after>'''
Copyright (c) 2016 Abhishek Agrawal (abhishek.agrawal@protonmail.com)
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
# System
import sys
import time
from tqdm import tqdm
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal (abhishek.agrawal@protonmail.com) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
# Get plotting packages
import matplotlib
import matplotlib.colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
# Operations
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
|
df48f6b57dd2fff8e22c0df95247afb0a630dd76
|
infrastructure/utils/utils.py
|
infrastructure/utils/utils.py
|
import contextlib
from openerp import exceptions
@contextlib.contextmanager
def synchronize_on_config_parameter(env, parameter):
param_model = env['ir.config_parameter']
param = param_model.search([('key', '=', parameter)])
if param:
try:
env.cr.execute(
'''select *
from ir_config_parameter
where id = %s
for update nowait''',
param.id,
log_exceptions=False
)
except psycopg2.OperationalError, e:
raise exceptions.UserError(
'Cannot synchronize access. Another process lock the parameter'
)
yield
|
Create context manager to lock on ir.config_paramter record
|
Create context manager to lock on ir.config_paramter record
|
Python
|
agpl-3.0
|
ingadhoc/infrastructure,ingadhoc/odoo-infrastructure,online-sanaullah/odoo-infrastructure
|
Create context manager to lock on ir.config_paramter record
|
import contextlib
from openerp import exceptions
@contextlib.contextmanager
def synchronize_on_config_parameter(env, parameter):
param_model = env['ir.config_parameter']
param = param_model.search([('key', '=', parameter)])
if param:
try:
env.cr.execute(
'''select *
from ir_config_parameter
where id = %s
for update nowait''',
param.id,
log_exceptions=False
)
except psycopg2.OperationalError, e:
raise exceptions.UserError(
'Cannot synchronize access. Another process lock the parameter'
)
yield
|
<commit_before><commit_msg>Create context manager to lock on ir.config_paramter record<commit_after>
|
import contextlib
from openerp import exceptions
@contextlib.contextmanager
def synchronize_on_config_parameter(env, parameter):
param_model = env['ir.config_parameter']
param = param_model.search([('key', '=', parameter)])
if param:
try:
env.cr.execute(
'''select *
from ir_config_parameter
where id = %s
for update nowait''',
param.id,
log_exceptions=False
)
except psycopg2.OperationalError, e:
raise exceptions.UserError(
'Cannot synchronize access. Another process lock the parameter'
)
yield
|
Create context manager to lock on ir.config_paramter recordimport contextlib
from openerp import exceptions
@contextlib.contextmanager
def synchronize_on_config_parameter(env, parameter):
param_model = env['ir.config_parameter']
param = param_model.search([('key', '=', parameter)])
if param:
try:
env.cr.execute(
'''select *
from ir_config_parameter
where id = %s
for update nowait''',
param.id,
log_exceptions=False
)
except psycopg2.OperationalError, e:
raise exceptions.UserError(
'Cannot synchronize access. Another process lock the parameter'
)
yield
|
<commit_before><commit_msg>Create context manager to lock on ir.config_paramter record<commit_after>import contextlib
from openerp import exceptions
@contextlib.contextmanager
def synchronize_on_config_parameter(env, parameter):
param_model = env['ir.config_parameter']
param = param_model.search([('key', '=', parameter)])
if param:
try:
env.cr.execute(
'''select *
from ir_config_parameter
where id = %s
for update nowait''',
param.id,
log_exceptions=False
)
except psycopg2.OperationalError, e:
raise exceptions.UserError(
'Cannot synchronize access. Another process lock the parameter'
)
yield
|
|
1818959519207eb1bd888d8abed096c32bb85b96
|
pybtex/tests/bibtex_parser_test.py
|
pybtex/tests/bibtex_parser_test.py
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
Python
|
mit
|
live-clones/pybtex
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
<commit_before><commit_msg>Add a regression test for whitespace normalization in the BibTeX parser.<commit_after>
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
Add a regression test for whitespace normalization in the BibTeX parser.from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
<commit_before><commit_msg>Add a regression test for whitespace normalization in the BibTeX parser.<commit_after>from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
|
f3c546afd159d9a4ba006f448faec1653d974342
|
mozillians/users/migrations/0038_auto_20180815_0108.py
|
mozillians/users/migrations/0038_auto_20180815_0108.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 08:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
def add_missing_employee_vouches(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
IdpProfile = apps.get_model('users', 'IdpProfile')
for profile in UserProfile.objects.all():
emails = [idp.email for idp in IdpProfile.objects.filter(profile=profile)]
email_exists = any([email for email in set(emails)
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not profile.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
profile.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0037_auto_20180720_0305'),
]
operations = [
migrations.RunPython(add_missing_employee_vouches, backwards),
]
|
Add missing vouches to employees.
|
Add missing vouches to employees.
|
Python
|
bsd-3-clause
|
akatsoulas/mozillians,mozilla/mozillians,mozilla/mozillians,mozilla/mozillians,akatsoulas/mozillians,akatsoulas/mozillians,mozilla/mozillians,akatsoulas/mozillians
|
Add missing vouches to employees.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 08:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
def add_missing_employee_vouches(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
IdpProfile = apps.get_model('users', 'IdpProfile')
for profile in UserProfile.objects.all():
emails = [idp.email for idp in IdpProfile.objects.filter(profile=profile)]
email_exists = any([email for email in set(emails)
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not profile.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
profile.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0037_auto_20180720_0305'),
]
operations = [
migrations.RunPython(add_missing_employee_vouches, backwards),
]
|
<commit_before><commit_msg>Add missing vouches to employees.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 08:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
def add_missing_employee_vouches(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
IdpProfile = apps.get_model('users', 'IdpProfile')
for profile in UserProfile.objects.all():
emails = [idp.email for idp in IdpProfile.objects.filter(profile=profile)]
email_exists = any([email for email in set(emails)
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not profile.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
profile.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0037_auto_20180720_0305'),
]
operations = [
migrations.RunPython(add_missing_employee_vouches, backwards),
]
|
Add missing vouches to employees.# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 08:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
def add_missing_employee_vouches(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
IdpProfile = apps.get_model('users', 'IdpProfile')
for profile in UserProfile.objects.all():
emails = [idp.email for idp in IdpProfile.objects.filter(profile=profile)]
email_exists = any([email for email in set(emails)
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not profile.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
profile.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0037_auto_20180720_0305'),
]
operations = [
migrations.RunPython(add_missing_employee_vouches, backwards),
]
|
<commit_before><commit_msg>Add missing vouches to employees.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 08:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
def add_missing_employee_vouches(apps, schema_editor):
UserProfile = apps.get_model('users', 'UserProfile')
IdpProfile = apps.get_model('users', 'IdpProfile')
for profile in UserProfile.objects.all():
emails = [idp.email for idp in IdpProfile.objects.filter(profile=profile)]
email_exists = any([email for email in set(emails)
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not profile.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
profile.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0037_auto_20180720_0305'),
]
operations = [
migrations.RunPython(add_missing_employee_vouches, backwards),
]
|
|
3a75f01d6ece9eec332dff1ca7518af4f7c7f462
|
test/hoomd_script/test_barrier.py
|
test/hoomd_script/test_barrier.py
|
# -*- coding: iso-8859-1 -*-
# Maintainer: jglaser
from hoomd_script import *
init.setup_exec_conf();
import unittest
import os
# unit test to run a simple polymer system with pair and bond potentials
class replicate(unittest.TestCase):
def test_barrier(self):
comm.barrier();
def test_barrier_all(self):
comm.barrier_all();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add unit test for barrier
|
Add unit test for barrier
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Add unit test for barrier
|
# -*- coding: iso-8859-1 -*-
# Maintainer: jglaser
from hoomd_script import *
init.setup_exec_conf();
import unittest
import os
# unit test to run a simple polymer system with pair and bond potentials
class replicate(unittest.TestCase):
def test_barrier(self):
comm.barrier();
def test_barrier_all(self):
comm.barrier_all();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add unit test for barrier<commit_after>
|
# -*- coding: iso-8859-1 -*-
# Maintainer: jglaser
from hoomd_script import *
init.setup_exec_conf();
import unittest
import os
# unit test to run a simple polymer system with pair and bond potentials
class replicate(unittest.TestCase):
def test_barrier(self):
comm.barrier();
def test_barrier_all(self):
comm.barrier_all();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add unit test for barrier# -*- coding: iso-8859-1 -*-
# Maintainer: jglaser
from hoomd_script import *
init.setup_exec_conf();
import unittest
import os
# unit test to run a simple polymer system with pair and bond potentials
class replicate(unittest.TestCase):
def test_barrier(self):
comm.barrier();
def test_barrier_all(self):
comm.barrier_all();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add unit test for barrier<commit_after># -*- coding: iso-8859-1 -*-
# Maintainer: jglaser
from hoomd_script import *
init.setup_exec_conf();
import unittest
import os
# unit test to run a simple polymer system with pair and bond potentials
class replicate(unittest.TestCase):
def test_barrier(self):
comm.barrier();
def test_barrier_all(self):
comm.barrier_all();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
|
c8bff113f85738f783fd00dcb1b9dacfa24cf589
|
auth0/v2/test/test_emails.py
|
auth0/v2/test/test_emails.py
|
import unittest
import mock
from ..emails import Emails
class TestEmails(unittest.TestCase):
@mock.patch('auth0.v2.emails.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.get()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
e.get(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.emails.RestClient')
def test_config(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.config({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.update({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.delete()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/emails/provider'
)
|
Add unit tests for Emails()
|
Add unit tests for Emails()
|
Python
|
mit
|
auth0/auth0-python,auth0/auth0-python
|
Add unit tests for Emails()
|
import unittest
import mock
from ..emails import Emails
class TestEmails(unittest.TestCase):
@mock.patch('auth0.v2.emails.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.get()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
e.get(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.emails.RestClient')
def test_config(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.config({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.update({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.delete()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/emails/provider'
)
|
<commit_before><commit_msg>Add unit tests for Emails()<commit_after>
|
import unittest
import mock
from ..emails import Emails
class TestEmails(unittest.TestCase):
@mock.patch('auth0.v2.emails.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.get()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
e.get(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.emails.RestClient')
def test_config(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.config({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.update({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.delete()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/emails/provider'
)
|
Add unit tests for Emails()import unittest
import mock
from ..emails import Emails
class TestEmails(unittest.TestCase):
@mock.patch('auth0.v2.emails.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.get()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
e.get(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.emails.RestClient')
def test_config(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.config({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.update({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.delete()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/emails/provider'
)
|
<commit_before><commit_msg>Add unit tests for Emails()<commit_after>import unittest
import mock
from ..emails import Emails
class TestEmails(unittest.TestCase):
@mock.patch('auth0.v2.emails.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.get()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
e.get(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.emails.RestClient')
def test_config(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.config({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.update({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/emails/provider', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.emails.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
e = Emails(domain='domain', jwt_token='jwttoken')
e.delete()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/emails/provider'
)
|
|
db11120077783fc3cc539fa919f822004d3ff355
|
pari/user/management/commands/bulk_mailer.py
|
pari/user/management/commands/bulk_mailer.py
|
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage, get_connection
from django.template import Template, Context
from mezzanine.pages.models import Page
import csv
class Command(BaseCommand):
args = "<csv_file_path> [<from>]"
help = "Send mails to contacts on a CSV file"
def handle(self, *args, **options):
csv_file = args[0]
dr = csv.DictReader(open(csv_file, "r"))
conn = get_connection()
if len(args) > 1:
from_email = args[1]
else:
contact_page = Page.objects.get(slug="contact-us")
from_email = contact_page.form.email_from
subject_tmpl = Template("")
body_tmpl = Template("")
for row in dr:
if row["message"]:
body_tmpl = Template(row["message"])
if row["subject"]:
subject_tmpl = Template(row["subject"])
kwargs = {
"subject": subject_tmpl.render(Context(row)),
"body": body_tmpl.render(Context(row)),
"from_email": from_email,
"to": [row["to"]],
"connection": conn
}
msg = EmailMessage(**kwargs)
msg.send()
|
Send mails to contacts referenced in a csv file.
|
Send mails to contacts referenced in a csv file.
|
Python
|
bsd-3-clause
|
RuralIndia/pari,RuralIndia/pari,RuralIndia/pari,RuralIndia/pari
|
Send mails to contacts referenced in a csv file.
|
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage, get_connection
from django.template import Template, Context
from mezzanine.pages.models import Page
import csv
class Command(BaseCommand):
args = "<csv_file_path> [<from>]"
help = "Send mails to contacts on a CSV file"
def handle(self, *args, **options):
csv_file = args[0]
dr = csv.DictReader(open(csv_file, "r"))
conn = get_connection()
if len(args) > 1:
from_email = args[1]
else:
contact_page = Page.objects.get(slug="contact-us")
from_email = contact_page.form.email_from
subject_tmpl = Template("")
body_tmpl = Template("")
for row in dr:
if row["message"]:
body_tmpl = Template(row["message"])
if row["subject"]:
subject_tmpl = Template(row["subject"])
kwargs = {
"subject": subject_tmpl.render(Context(row)),
"body": body_tmpl.render(Context(row)),
"from_email": from_email,
"to": [row["to"]],
"connection": conn
}
msg = EmailMessage(**kwargs)
msg.send()
|
<commit_before><commit_msg>Send mails to contacts referenced in a csv file.<commit_after>
|
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage, get_connection
from django.template import Template, Context
from mezzanine.pages.models import Page
import csv
class Command(BaseCommand):
args = "<csv_file_path> [<from>]"
help = "Send mails to contacts on a CSV file"
def handle(self, *args, **options):
csv_file = args[0]
dr = csv.DictReader(open(csv_file, "r"))
conn = get_connection()
if len(args) > 1:
from_email = args[1]
else:
contact_page = Page.objects.get(slug="contact-us")
from_email = contact_page.form.email_from
subject_tmpl = Template("")
body_tmpl = Template("")
for row in dr:
if row["message"]:
body_tmpl = Template(row["message"])
if row["subject"]:
subject_tmpl = Template(row["subject"])
kwargs = {
"subject": subject_tmpl.render(Context(row)),
"body": body_tmpl.render(Context(row)),
"from_email": from_email,
"to": [row["to"]],
"connection": conn
}
msg = EmailMessage(**kwargs)
msg.send()
|
Send mails to contacts referenced in a csv file.from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage, get_connection
from django.template import Template, Context
from mezzanine.pages.models import Page
import csv
class Command(BaseCommand):
args = "<csv_file_path> [<from>]"
help = "Send mails to contacts on a CSV file"
def handle(self, *args, **options):
csv_file = args[0]
dr = csv.DictReader(open(csv_file, "r"))
conn = get_connection()
if len(args) > 1:
from_email = args[1]
else:
contact_page = Page.objects.get(slug="contact-us")
from_email = contact_page.form.email_from
subject_tmpl = Template("")
body_tmpl = Template("")
for row in dr:
if row["message"]:
body_tmpl = Template(row["message"])
if row["subject"]:
subject_tmpl = Template(row["subject"])
kwargs = {
"subject": subject_tmpl.render(Context(row)),
"body": body_tmpl.render(Context(row)),
"from_email": from_email,
"to": [row["to"]],
"connection": conn
}
msg = EmailMessage(**kwargs)
msg.send()
|
<commit_before><commit_msg>Send mails to contacts referenced in a csv file.<commit_after>from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage, get_connection
from django.template import Template, Context
from mezzanine.pages.models import Page
import csv
class Command(BaseCommand):
args = "<csv_file_path> [<from>]"
help = "Send mails to contacts on a CSV file"
def handle(self, *args, **options):
csv_file = args[0]
dr = csv.DictReader(open(csv_file, "r"))
conn = get_connection()
if len(args) > 1:
from_email = args[1]
else:
contact_page = Page.objects.get(slug="contact-us")
from_email = contact_page.form.email_from
subject_tmpl = Template("")
body_tmpl = Template("")
for row in dr:
if row["message"]:
body_tmpl = Template(row["message"])
if row["subject"]:
subject_tmpl = Template(row["subject"])
kwargs = {
"subject": subject_tmpl.render(Context(row)),
"body": body_tmpl.render(Context(row)),
"from_email": from_email,
"to": [row["to"]],
"connection": conn
}
msg = EmailMessage(**kwargs)
msg.send()
|
|
ebcc9aef51d6244e69b5241b49359f8e8d2c0f85
|
estmator_project/estmator_project/test_functional.py
|
estmator_project/estmator_project/test_functional.py
|
from django.contrib.staticfiles.testing import LiveServerTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterTest, cls).setUpClass()
cls.browser = Browser()
cls.user1 = UserFactory()
cls.user1.set_password('secret')
cls.user1.save()
cls.client1 = ClientFactory()
cls.category1 = CategoryFactory(name='Chairs')
cls.category2 = CategoryFactory(name='Tables')
cls.product1 = ProductFactory(category=cls.category1)
cls.product2 = ProductFactory(category=cls.category1)
cls.product3 = ProductFactory(category=cls.category2)
cls.login_helper(cls.user1.username, 'secret')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(LiveServerSplinterTest, cls).tearDownClass()
sleep(3)
@classmethod
def login_helper(cls, username, password):
cls.browser.visit('{}{}'.format(
cls.live_server_url, '/accounts/login/')
)
cls.browser.fill('username', username)
cls.browser.fill('password', password)
cls.browser.find_by_value('Log in').first.click()
def setUp(self):
pass
def test_auth_redirect_to_menu_page(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
|
Add beginning of splinter tests
|
Add beginning of splinter tests
|
Python
|
mit
|
Estmator/EstmatorApp,Estmator/EstmatorApp,Estmator/EstmatorApp
|
Add beginning of splinter tests
|
from django.contrib.staticfiles.testing import LiveServerTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterTest, cls).setUpClass()
cls.browser = Browser()
cls.user1 = UserFactory()
cls.user1.set_password('secret')
cls.user1.save()
cls.client1 = ClientFactory()
cls.category1 = CategoryFactory(name='Chairs')
cls.category2 = CategoryFactory(name='Tables')
cls.product1 = ProductFactory(category=cls.category1)
cls.product2 = ProductFactory(category=cls.category1)
cls.product3 = ProductFactory(category=cls.category2)
cls.login_helper(cls.user1.username, 'secret')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(LiveServerSplinterTest, cls).tearDownClass()
sleep(3)
@classmethod
def login_helper(cls, username, password):
cls.browser.visit('{}{}'.format(
cls.live_server_url, '/accounts/login/')
)
cls.browser.fill('username', username)
cls.browser.fill('password', password)
cls.browser.find_by_value('Log in').first.click()
def setUp(self):
pass
def test_auth_redirect_to_menu_page(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
|
<commit_before><commit_msg>Add beginning of splinter tests<commit_after>
|
from django.contrib.staticfiles.testing import LiveServerTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterTest, cls).setUpClass()
cls.browser = Browser()
cls.user1 = UserFactory()
cls.user1.set_password('secret')
cls.user1.save()
cls.client1 = ClientFactory()
cls.category1 = CategoryFactory(name='Chairs')
cls.category2 = CategoryFactory(name='Tables')
cls.product1 = ProductFactory(category=cls.category1)
cls.product2 = ProductFactory(category=cls.category1)
cls.product3 = ProductFactory(category=cls.category2)
cls.login_helper(cls.user1.username, 'secret')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(LiveServerSplinterTest, cls).tearDownClass()
sleep(3)
@classmethod
def login_helper(cls, username, password):
cls.browser.visit('{}{}'.format(
cls.live_server_url, '/accounts/login/')
)
cls.browser.fill('username', username)
cls.browser.fill('password', password)
cls.browser.find_by_value('Log in').first.click()
def setUp(self):
pass
def test_auth_redirect_to_menu_page(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
|
Add beginning of splinter testsfrom django.contrib.staticfiles.testing import LiveServerTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterTest, cls).setUpClass()
cls.browser = Browser()
cls.user1 = UserFactory()
cls.user1.set_password('secret')
cls.user1.save()
cls.client1 = ClientFactory()
cls.category1 = CategoryFactory(name='Chairs')
cls.category2 = CategoryFactory(name='Tables')
cls.product1 = ProductFactory(category=cls.category1)
cls.product2 = ProductFactory(category=cls.category1)
cls.product3 = ProductFactory(category=cls.category2)
cls.login_helper(cls.user1.username, 'secret')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(LiveServerSplinterTest, cls).tearDownClass()
sleep(3)
@classmethod
def login_helper(cls, username, password):
cls.browser.visit('{}{}'.format(
cls.live_server_url, '/accounts/login/')
)
cls.browser.fill('username', username)
cls.browser.fill('password', password)
cls.browser.find_by_value('Log in').first.click()
def setUp(self):
pass
def test_auth_redirect_to_menu_page(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
|
<commit_before><commit_msg>Add beginning of splinter tests<commit_after>from django.contrib.staticfiles.testing import LiveServerTestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterTest, cls).setUpClass()
cls.browser = Browser()
cls.user1 = UserFactory()
cls.user1.set_password('secret')
cls.user1.save()
cls.client1 = ClientFactory()
cls.category1 = CategoryFactory(name='Chairs')
cls.category2 = CategoryFactory(name='Tables')
cls.product1 = ProductFactory(category=cls.category1)
cls.product2 = ProductFactory(category=cls.category1)
cls.product3 = ProductFactory(category=cls.category2)
cls.login_helper(cls.user1.username, 'secret')
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(LiveServerSplinterTest, cls).tearDownClass()
sleep(3)
@classmethod
def login_helper(cls, username, password):
cls.browser.visit('{}{}'.format(
cls.live_server_url, '/accounts/login/')
)
cls.browser.fill('username', username)
cls.browser.fill('password', password)
cls.browser.find_by_value('Log in').first.click()
def setUp(self):
pass
def test_auth_redirect_to_menu_page(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
|
|
5a88660c98511f8cc0a8c210490a60b4805bdc58
|
tests/unit/anchore_engine/services/policy_engine/engine/policy/test_gate.py
|
tests/unit/anchore_engine/services/policy_engine/engine/policy/test_gate.py
|
import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
|
Add unit test for the BaseTrigger.parameters() method
|
Add unit test for the BaseTrigger.parameters() method
Signed-off-by: Samuel Dacanay <600ff55f21d6ff32f29d8ef60f49bc6b12380523@anchore.com>
|
Python
|
apache-2.0
|
anchore/anchore-engine,anchore/anchore-engine,anchore/anchore-engine
|
Add unit test for the BaseTrigger.parameters() method
Signed-off-by: Samuel Dacanay <600ff55f21d6ff32f29d8ef60f49bc6b12380523@anchore.com>
|
import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
|
<commit_before><commit_msg>Add unit test for the BaseTrigger.parameters() method
Signed-off-by: Samuel Dacanay <600ff55f21d6ff32f29d8ef60f49bc6b12380523@anchore.com><commit_after>
|
import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
|
Add unit test for the BaseTrigger.parameters() method
Signed-off-by: Samuel Dacanay <600ff55f21d6ff32f29d8ef60f49bc6b12380523@anchore.com>import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
|
<commit_before><commit_msg>Add unit test for the BaseTrigger.parameters() method
Signed-off-by: Samuel Dacanay <600ff55f21d6ff32f29d8ef60f49bc6b12380523@anchore.com><commit_after>import pytest
from anchore_engine.services.policy_engine.engine.policy.gates import PackageCheckGate
from anchore_engine.services.policy_engine.engine.policy.gates.dockerfile import (
EffectiveUserTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.gates.npms import (
PkgMatchTrigger,
)
from anchore_engine.services.policy_engine.engine.policy.params import (
CommaDelimitedStringListParameter,
EnumStringParameter,
TriggerParameter,
)
class TestBaseTrigger:
"""
For the purposes of this test, we are using a few random trigger and gate (instead of testing every gate/trigger class combo)
To verify the parameters method works well.
This is specific to the random trigger that was selected, essentially verifying that
the parameters method does what it's supposed to, which is retrieving a dict of
data attributes and their values.
Note: for the gate parameter it is crucial to use a gate that has __lifecycle_state__ == LifecycleStates.eol for this test.
Otherwise, the BaseTrigger constructor won't be able to execute because the parameter validation will fail
"""
@pytest.mark.parametrize(
"param",
[
pytest.param(
{
"trigger": EffectiveUserTrigger,
"gate": PackageCheckGate,
"expected_params": {
"user": CommaDelimitedStringListParameter,
"allowed_type": EnumStringParameter,
},
},
id="effective-user-trigger",
),
pytest.param(
{
"trigger": PkgMatchTrigger,
"gate": PackageCheckGate,
"expected_params": {
"name": TriggerParameter,
"version": TriggerParameter,
},
},
id="npm-pkg-match-trigger",
),
],
)
def test_parameters(self, param):
parameters = param["trigger"](param["gate"]).parameters()
for key, value in param["expected_params"].items():
assert parameters.get(key).__class__ == value
|
|
3f2ae3d0efe05389ef5f269f7e3d926d64da8e3e
|
sqlobject/tests/test_NoneValuedResultItem.py
|
sqlobject/tests/test_NoneValuedResultItem.py
|
'''Test that selectResults handle NULL values
from, for example, outer joins.'''
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComposer(SQLObject):
name = StringCol()
class TestWork(SQLObject):
class sqlmeta:
idName = "work_id"
composer = ForeignKey('TestComposer')
title = StringCol()
def test1():
setupClass(TestComposer)
setupClass(TestWork)
c = TestComposer(name='Mahler, Gustav')
w = TestWork(composer=c, title='Symphony No. 9')
c2 = TestComposer(name='Bruckner, Anton')
# but don't add any works for Bruckner
# do a left join, a common use case that often involves NULL results
s = TestWork.select(join=sqlbuilder.LEFTJOINOn(TestComposer, TestWork,
TestComposer.q.id==TestWork.q.composerID))
assert tuple(s)==(w, None)
|
Test that query results with None IDs (e.g. some outer join cases) are handled correctly, i.e. return None for that object.
|
Test that query results with None IDs (e.g. some outer join cases)
are handled correctly, i.e. return None for that object.
git-svn-id: fe2f45b2405132b4a9af5caedfc153c2e6f542f4@777 95a46c32-92d2-0310-94a5-8d71aeb3d4b3
|
Python
|
lgpl-2.1
|
drnlm/sqlobject,drnlm/sqlobject,sqlobject/sqlobject,sqlobject/sqlobject
|
Test that query results with None IDs (e.g. some outer join cases)
are handled correctly, i.e. return None for that object.
git-svn-id: fe2f45b2405132b4a9af5caedfc153c2e6f542f4@777 95a46c32-92d2-0310-94a5-8d71aeb3d4b3
|
'''Test that selectResults handle NULL values
from, for example, outer joins.'''
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComposer(SQLObject):
name = StringCol()
class TestWork(SQLObject):
class sqlmeta:
idName = "work_id"
composer = ForeignKey('TestComposer')
title = StringCol()
def test1():
setupClass(TestComposer)
setupClass(TestWork)
c = TestComposer(name='Mahler, Gustav')
w = TestWork(composer=c, title='Symphony No. 9')
c2 = TestComposer(name='Bruckner, Anton')
# but don't add any works for Bruckner
# do a left join, a common use case that often involves NULL results
s = TestWork.select(join=sqlbuilder.LEFTJOINOn(TestComposer, TestWork,
TestComposer.q.id==TestWork.q.composerID))
assert tuple(s)==(w, None)
|
<commit_before><commit_msg>Test that query results with None IDs (e.g. some outer join cases)
are handled correctly, i.e. return None for that object.
git-svn-id: fe2f45b2405132b4a9af5caedfc153c2e6f542f4@777 95a46c32-92d2-0310-94a5-8d71aeb3d4b3<commit_after>
|
'''Test that selectResults handle NULL values
from, for example, outer joins.'''
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComposer(SQLObject):
name = StringCol()
class TestWork(SQLObject):
class sqlmeta:
idName = "work_id"
composer = ForeignKey('TestComposer')
title = StringCol()
def test1():
setupClass(TestComposer)
setupClass(TestWork)
c = TestComposer(name='Mahler, Gustav')
w = TestWork(composer=c, title='Symphony No. 9')
c2 = TestComposer(name='Bruckner, Anton')
# but don't add any works for Bruckner
# do a left join, a common use case that often involves NULL results
s = TestWork.select(join=sqlbuilder.LEFTJOINOn(TestComposer, TestWork,
TestComposer.q.id==TestWork.q.composerID))
assert tuple(s)==(w, None)
|
Test that query results with None IDs (e.g. some outer join cases)
are handled correctly, i.e. return None for that object.
git-svn-id: fe2f45b2405132b4a9af5caedfc153c2e6f542f4@777 95a46c32-92d2-0310-94a5-8d71aeb3d4b3'''Test that selectResults handle NULL values
from, for example, outer joins.'''
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComposer(SQLObject):
name = StringCol()
class TestWork(SQLObject):
class sqlmeta:
idName = "work_id"
composer = ForeignKey('TestComposer')
title = StringCol()
def test1():
setupClass(TestComposer)
setupClass(TestWork)
c = TestComposer(name='Mahler, Gustav')
w = TestWork(composer=c, title='Symphony No. 9')
c2 = TestComposer(name='Bruckner, Anton')
# but don't add any works for Bruckner
# do a left join, a common use case that often involves NULL results
s = TestWork.select(join=sqlbuilder.LEFTJOINOn(TestComposer, TestWork,
TestComposer.q.id==TestWork.q.composerID))
assert tuple(s)==(w, None)
|
<commit_before><commit_msg>Test that query results with None IDs (e.g. some outer join cases)
are handled correctly, i.e. return None for that object.
git-svn-id: fe2f45b2405132b4a9af5caedfc153c2e6f542f4@777 95a46c32-92d2-0310-94a5-8d71aeb3d4b3<commit_after>'''Test that selectResults handle NULL values
from, for example, outer joins.'''
from sqlobject import *
from sqlobject.tests.dbtest import *
class TestComposer(SQLObject):
name = StringCol()
class TestWork(SQLObject):
class sqlmeta:
idName = "work_id"
composer = ForeignKey('TestComposer')
title = StringCol()
def test1():
setupClass(TestComposer)
setupClass(TestWork)
c = TestComposer(name='Mahler, Gustav')
w = TestWork(composer=c, title='Symphony No. 9')
c2 = TestComposer(name='Bruckner, Anton')
# but don't add any works for Bruckner
# do a left join, a common use case that often involves NULL results
s = TestWork.select(join=sqlbuilder.LEFTJOINOn(TestComposer, TestWork,
TestComposer.q.id==TestWork.q.composerID))
assert tuple(s)==(w, None)
|
|
e1132fb8642a572eb674b69160db6bbd83b52cab
|
resolwe_bio/migrations/0007_sample_descriptor_dirty.py
|
resolwe_bio/migrations/0007_sample_descriptor_dirty.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio', '0006_alter_versionfield'),
]
operations = [
migrations.AddField(
model_name='sample',
name='descriptor_dirty',
field=models.BooleanField(default=False),
),
]
|
Add migration for descriptor_dirty field in Sample
|
Add migration for descriptor_dirty field in Sample
|
Python
|
apache-2.0
|
genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio
|
Add migration for descriptor_dirty field in Sample
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio', '0006_alter_versionfield'),
]
operations = [
migrations.AddField(
model_name='sample',
name='descriptor_dirty',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>Add migration for descriptor_dirty field in Sample<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio', '0006_alter_versionfield'),
]
operations = [
migrations.AddField(
model_name='sample',
name='descriptor_dirty',
field=models.BooleanField(default=False),
),
]
|
Add migration for descriptor_dirty field in Sample# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio', '0006_alter_versionfield'),
]
operations = [
migrations.AddField(
model_name='sample',
name='descriptor_dirty',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>Add migration for descriptor_dirty field in Sample<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio', '0006_alter_versionfield'),
]
operations = [
migrations.AddField(
model_name='sample',
name='descriptor_dirty',
field=models.BooleanField(default=False),
),
]
|
|
e3fdf6c03420cd95da33f4e36a0721308107687e
|
suppress_classifier.py
|
suppress_classifier.py
|
#!/usr/bin/env python3
import argparse
import subprocess
from itertools import combinations
from sys import executable
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='path to the classifier module')
parser.add_argument('feature_count', help='the number of features', type=int)
parser.add_argument('mwe', help='path to the MWE module')
parser.add_argument('training_data', help='the datafile to train with')
parser.add_argument('testing_data', help='the datafile to test with')
parser.add_argument('training_output', help='the output file for training data classification')
parser.add_argument('testing_output', help='the output file for testing data classification')
args = parser.parse_args()
for count in range(args.feature_count + 1):
for combination in combinations(range(args.feature_count), count):
classify = [
executable,
args.classifier, args.training_data, args.testing_data, args.training_output, args.testing_output
]
for suppressed_feature_number in combination:
classify.append('-s')
classify.append(str(suppressed_feature_number))
print(f"Classifying while suppressing features: {combination}")
print()
subprocess.run(classify)
mwe = [
executable,
args.mwe, args.training_output, args.testing_output
]
process = subprocess.run(mwe, stdout=subprocess.PIPE)
print(process.stdout.decode('utf-8'))
print('-' * 80)
|
Add automated script for determining which features to suppress
|
Add automated script for determining which features to suppress
|
Python
|
mit
|
pdarragh/MinSem
|
Add automated script for determining which features to suppress
|
#!/usr/bin/env python3
import argparse
import subprocess
from itertools import combinations
from sys import executable
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='path to the classifier module')
parser.add_argument('feature_count', help='the number of features', type=int)
parser.add_argument('mwe', help='path to the MWE module')
parser.add_argument('training_data', help='the datafile to train with')
parser.add_argument('testing_data', help='the datafile to test with')
parser.add_argument('training_output', help='the output file for training data classification')
parser.add_argument('testing_output', help='the output file for testing data classification')
args = parser.parse_args()
for count in range(args.feature_count + 1):
for combination in combinations(range(args.feature_count), count):
classify = [
executable,
args.classifier, args.training_data, args.testing_data, args.training_output, args.testing_output
]
for suppressed_feature_number in combination:
classify.append('-s')
classify.append(str(suppressed_feature_number))
print(f"Classifying while suppressing features: {combination}")
print()
subprocess.run(classify)
mwe = [
executable,
args.mwe, args.training_output, args.testing_output
]
process = subprocess.run(mwe, stdout=subprocess.PIPE)
print(process.stdout.decode('utf-8'))
print('-' * 80)
|
<commit_before><commit_msg>Add automated script for determining which features to suppress<commit_after>
|
#!/usr/bin/env python3
import argparse
import subprocess
from itertools import combinations
from sys import executable
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='path to the classifier module')
parser.add_argument('feature_count', help='the number of features', type=int)
parser.add_argument('mwe', help='path to the MWE module')
parser.add_argument('training_data', help='the datafile to train with')
parser.add_argument('testing_data', help='the datafile to test with')
parser.add_argument('training_output', help='the output file for training data classification')
parser.add_argument('testing_output', help='the output file for testing data classification')
args = parser.parse_args()
for count in range(args.feature_count + 1):
for combination in combinations(range(args.feature_count), count):
classify = [
executable,
args.classifier, args.training_data, args.testing_data, args.training_output, args.testing_output
]
for suppressed_feature_number in combination:
classify.append('-s')
classify.append(str(suppressed_feature_number))
print(f"Classifying while suppressing features: {combination}")
print()
subprocess.run(classify)
mwe = [
executable,
args.mwe, args.training_output, args.testing_output
]
process = subprocess.run(mwe, stdout=subprocess.PIPE)
print(process.stdout.decode('utf-8'))
print('-' * 80)
|
Add automated script for determining which features to suppress#!/usr/bin/env python3
import argparse
import subprocess
from itertools import combinations
from sys import executable
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='path to the classifier module')
parser.add_argument('feature_count', help='the number of features', type=int)
parser.add_argument('mwe', help='path to the MWE module')
parser.add_argument('training_data', help='the datafile to train with')
parser.add_argument('testing_data', help='the datafile to test with')
parser.add_argument('training_output', help='the output file for training data classification')
parser.add_argument('testing_output', help='the output file for testing data classification')
args = parser.parse_args()
for count in range(args.feature_count + 1):
for combination in combinations(range(args.feature_count), count):
classify = [
executable,
args.classifier, args.training_data, args.testing_data, args.training_output, args.testing_output
]
for suppressed_feature_number in combination:
classify.append('-s')
classify.append(str(suppressed_feature_number))
print(f"Classifying while suppressing features: {combination}")
print()
subprocess.run(classify)
mwe = [
executable,
args.mwe, args.training_output, args.testing_output
]
process = subprocess.run(mwe, stdout=subprocess.PIPE)
print(process.stdout.decode('utf-8'))
print('-' * 80)
|
<commit_before><commit_msg>Add automated script for determining which features to suppress<commit_after>#!/usr/bin/env python3
import argparse
import subprocess
from itertools import combinations
from sys import executable
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='path to the classifier module')
parser.add_argument('feature_count', help='the number of features', type=int)
parser.add_argument('mwe', help='path to the MWE module')
parser.add_argument('training_data', help='the datafile to train with')
parser.add_argument('testing_data', help='the datafile to test with')
parser.add_argument('training_output', help='the output file for training data classification')
parser.add_argument('testing_output', help='the output file for testing data classification')
args = parser.parse_args()
for count in range(args.feature_count + 1):
for combination in combinations(range(args.feature_count), count):
classify = [
executable,
args.classifier, args.training_data, args.testing_data, args.training_output, args.testing_output
]
for suppressed_feature_number in combination:
classify.append('-s')
classify.append(str(suppressed_feature_number))
print(f"Classifying while suppressing features: {combination}")
print()
subprocess.run(classify)
mwe = [
executable,
args.mwe, args.training_output, args.testing_output
]
process = subprocess.run(mwe, stdout=subprocess.PIPE)
print(process.stdout.decode('utf-8'))
print('-' * 80)
|
|
80f9ed82cfedcca494aa7e960d546f992f2d6c60
|
supernova_report_parser.py
|
supernova_report_parser.py
|
import sys
import re
import json
import fileinput
import argparse
def print_json(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
s = m.group()
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(s, {})[d[1]] = d[0]
print(json.dumps(samples))
def print_csv(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
samples.setdefault("SAMPLE", []).append(m.group())
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(d[1], []).append(d[0])
data = []
for k,v in samples.items():
data.append([k] + v)
data = list(map(list, zip(*data))) # Transpose
for row in data:
print(",".join(row))
def main(args):
if args.csv:
print_csv(args.files)
else:
print_json(args.files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Supernova reports and print numbers in JSON or CSV")
parser.add_argument('--csv', action="store_true", default=False,
help='Output in CSV format, default is JSON')
parser.add_argument('files', metavar='FILE', nargs='*',
help='Files to parse, if empty or "-" stdin is used')
args = parser.parse_args()
main(args)
|
Add Supernova report parser script
|
Add Supernova report parser script
|
Python
|
mit
|
jgruselius/standalone_scripts,jgruselius/standalone_scripts
|
Add Supernova report parser script
|
import sys
import re
import json
import fileinput
import argparse
def print_json(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
s = m.group()
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(s, {})[d[1]] = d[0]
print(json.dumps(samples))
def print_csv(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
samples.setdefault("SAMPLE", []).append(m.group())
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(d[1], []).append(d[0])
data = []
for k,v in samples.items():
data.append([k] + v)
data = list(map(list, zip(*data))) # Transpose
for row in data:
print(",".join(row))
def main(args):
if args.csv:
print_csv(args.files)
else:
print_json(args.files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Supernova reports and print numbers in JSON or CSV")
parser.add_argument('--csv', action="store_true", default=False,
help='Output in CSV format, default is JSON')
parser.add_argument('files', metavar='FILE', nargs='*',
help='Files to parse, if empty or "-" stdin is used')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add Supernova report parser script<commit_after>
|
import sys
import re
import json
import fileinput
import argparse
def print_json(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
s = m.group()
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(s, {})[d[1]] = d[0]
print(json.dumps(samples))
def print_csv(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
samples.setdefault("SAMPLE", []).append(m.group())
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(d[1], []).append(d[0])
data = []
for k,v in samples.items():
data.append([k] + v)
data = list(map(list, zip(*data))) # Transpose
for row in data:
print(",".join(row))
def main(args):
if args.csv:
print_csv(args.files)
else:
print_json(args.files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Supernova reports and print numbers in JSON or CSV")
parser.add_argument('--csv', action="store_true", default=False,
help='Output in CSV format, default is JSON')
parser.add_argument('files', metavar='FILE', nargs='*',
help='Files to parse, if empty or "-" stdin is used')
args = parser.parse_args()
main(args)
|
Add Supernova report parser scriptimport sys
import re
import json
import fileinput
import argparse
def print_json(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
s = m.group()
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(s, {})[d[1]] = d[0]
print(json.dumps(samples))
def print_csv(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
samples.setdefault("SAMPLE", []).append(m.group())
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(d[1], []).append(d[0])
data = []
for k,v in samples.items():
data.append([k] + v)
data = list(map(list, zip(*data))) # Transpose
for row in data:
print(",".join(row))
def main(args):
if args.csv:
print_csv(args.files)
else:
print_json(args.files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Supernova reports and print numbers in JSON or CSV")
parser.add_argument('--csv', action="store_true", default=False,
help='Output in CSV format, default is JSON')
parser.add_argument('files', metavar='FILE', nargs='*',
help='Files to parse, if empty or "-" stdin is used')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add Supernova report parser script<commit_after>import sys
import re
import json
import fileinput
import argparse
def print_json(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
s = m.group()
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(s, {})[d[1]] = d[0]
print(json.dumps(samples))
def print_csv(files):
samples = {}
for line in fileinput.input(files=files):
m = re.search(r"P\d{4}_\d{3}", line)
if m:
samples.setdefault("SAMPLE", []).append(m.group())
else:
l = line.split("=")
if len(l) > 2:
d = [x.strip(" -") for x in l]
samples.setdefault(d[1], []).append(d[0])
data = []
for k,v in samples.items():
data.append([k] + v)
data = list(map(list, zip(*data))) # Transpose
for row in data:
print(",".join(row))
def main(args):
if args.csv:
print_csv(args.files)
else:
print_json(args.files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse Supernova reports and print numbers in JSON or CSV")
parser.add_argument('--csv', action="store_true", default=False,
help='Output in CSV format, default is JSON')
parser.add_argument('files', metavar='FILE', nargs='*',
help='Files to parse, if empty or "-" stdin is used')
args = parser.parse_args()
main(args)
|
|
8accd764b72299f6ed7be35104f9a2b958b0fce6
|
tests/commands_test.py
|
tests/commands_test.py
|
from unittest import TestCase
from mock import Mock, patch
from nyuki.commands import (_update_config, _merge_config, parse_init,
exhaustive_config)
class TestUpdateConfig(TestCase):
def test_001_call(self):
source = {'a': 1, 'b': {'c': 2}}
# Update
_update_config(source, '1', 'a')
self.assertEqual(source['a'], '1')
# Nested update
_update_config(source, 3, 'b.c')
self.assertEqual(source['b']['c'], 3)
# Create
_update_config(source, 4, 'b.d')
self.assertEqual(source['b']['d'], 4)
class TestMergeConfig(TestCase):
def test_001_call(self):
dict1 = {'a': 1, 'b': {'c': 2}}
dict2 = {'b': {'d': 3}}
result = _merge_config(dict1, dict2)
self.assertEqual(result, {'a': 1, 'b': {'c': 2, 'd': 3}})
class TestParseInit(TestCase):
@patch('nyuki.commands._read_file')
@patch('nyuki.commands._build_args')
def test_001_call(self, _build_args, _read_file):
# Arguments parsed
args = Mock()
args.cfg = 'config.json'
args.jid = 'test@localhost'
args.pwd = 'test'
args.srv = '127.0.0.1:5555'
args.api = 'localhost:8082'
args.debug = True
_build_args.return_value = args
# Config file
_read_file.return_value = {
'bus': {
'jid': 'iamrobert',
'password': 'mysuperstrongpassword',
}
}
# Result
configs = parse_init()
self.assertEqual(configs, {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
})
class TestExhaustiveConfig(TestCase):
def test_001_call(self):
parsed_configs = {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
}
self.assertIsInstance(exhaustive_config(parsed_configs), dict)
wrong_config = {
'bus': {
'jid': 'test@localhost'
}
}
with self.assertRaises(SystemExit) as call:
exhaustive_config(wrong_config)
self.assertEqual(call.exception.code, 1)
|
Add unit tests on commands.
|
Add unit tests on commands.
|
Python
|
apache-2.0
|
gdraynz/nyuki,optiflows/nyuki,optiflows/nyuki,gdraynz/nyuki
|
Add unit tests on commands.
|
from unittest import TestCase
from mock import Mock, patch
from nyuki.commands import (_update_config, _merge_config, parse_init,
exhaustive_config)
class TestUpdateConfig(TestCase):
def test_001_call(self):
source = {'a': 1, 'b': {'c': 2}}
# Update
_update_config(source, '1', 'a')
self.assertEqual(source['a'], '1')
# Nested update
_update_config(source, 3, 'b.c')
self.assertEqual(source['b']['c'], 3)
# Create
_update_config(source, 4, 'b.d')
self.assertEqual(source['b']['d'], 4)
class TestMergeConfig(TestCase):
def test_001_call(self):
dict1 = {'a': 1, 'b': {'c': 2}}
dict2 = {'b': {'d': 3}}
result = _merge_config(dict1, dict2)
self.assertEqual(result, {'a': 1, 'b': {'c': 2, 'd': 3}})
class TestParseInit(TestCase):
@patch('nyuki.commands._read_file')
@patch('nyuki.commands._build_args')
def test_001_call(self, _build_args, _read_file):
# Arguments parsed
args = Mock()
args.cfg = 'config.json'
args.jid = 'test@localhost'
args.pwd = 'test'
args.srv = '127.0.0.1:5555'
args.api = 'localhost:8082'
args.debug = True
_build_args.return_value = args
# Config file
_read_file.return_value = {
'bus': {
'jid': 'iamrobert',
'password': 'mysuperstrongpassword',
}
}
# Result
configs = parse_init()
self.assertEqual(configs, {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
})
class TestExhaustiveConfig(TestCase):
def test_001_call(self):
parsed_configs = {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
}
self.assertIsInstance(exhaustive_config(parsed_configs), dict)
wrong_config = {
'bus': {
'jid': 'test@localhost'
}
}
with self.assertRaises(SystemExit) as call:
exhaustive_config(wrong_config)
self.assertEqual(call.exception.code, 1)
|
<commit_before><commit_msg>Add unit tests on commands.<commit_after>
|
from unittest import TestCase
from mock import Mock, patch
from nyuki.commands import (_update_config, _merge_config, parse_init,
exhaustive_config)
class TestUpdateConfig(TestCase):
def test_001_call(self):
source = {'a': 1, 'b': {'c': 2}}
# Update
_update_config(source, '1', 'a')
self.assertEqual(source['a'], '1')
# Nested update
_update_config(source, 3, 'b.c')
self.assertEqual(source['b']['c'], 3)
# Create
_update_config(source, 4, 'b.d')
self.assertEqual(source['b']['d'], 4)
class TestMergeConfig(TestCase):
def test_001_call(self):
dict1 = {'a': 1, 'b': {'c': 2}}
dict2 = {'b': {'d': 3}}
result = _merge_config(dict1, dict2)
self.assertEqual(result, {'a': 1, 'b': {'c': 2, 'd': 3}})
class TestParseInit(TestCase):
@patch('nyuki.commands._read_file')
@patch('nyuki.commands._build_args')
def test_001_call(self, _build_args, _read_file):
# Arguments parsed
args = Mock()
args.cfg = 'config.json'
args.jid = 'test@localhost'
args.pwd = 'test'
args.srv = '127.0.0.1:5555'
args.api = 'localhost:8082'
args.debug = True
_build_args.return_value = args
# Config file
_read_file.return_value = {
'bus': {
'jid': 'iamrobert',
'password': 'mysuperstrongpassword',
}
}
# Result
configs = parse_init()
self.assertEqual(configs, {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
})
class TestExhaustiveConfig(TestCase):
def test_001_call(self):
parsed_configs = {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
}
self.assertIsInstance(exhaustive_config(parsed_configs), dict)
wrong_config = {
'bus': {
'jid': 'test@localhost'
}
}
with self.assertRaises(SystemExit) as call:
exhaustive_config(wrong_config)
self.assertEqual(call.exception.code, 1)
|
Add unit tests on commands.from unittest import TestCase
from mock import Mock, patch
from nyuki.commands import (_update_config, _merge_config, parse_init,
exhaustive_config)
class TestUpdateConfig(TestCase):
def test_001_call(self):
source = {'a': 1, 'b': {'c': 2}}
# Update
_update_config(source, '1', 'a')
self.assertEqual(source['a'], '1')
# Nested update
_update_config(source, 3, 'b.c')
self.assertEqual(source['b']['c'], 3)
# Create
_update_config(source, 4, 'b.d')
self.assertEqual(source['b']['d'], 4)
class TestMergeConfig(TestCase):
def test_001_call(self):
dict1 = {'a': 1, 'b': {'c': 2}}
dict2 = {'b': {'d': 3}}
result = _merge_config(dict1, dict2)
self.assertEqual(result, {'a': 1, 'b': {'c': 2, 'd': 3}})
class TestParseInit(TestCase):
@patch('nyuki.commands._read_file')
@patch('nyuki.commands._build_args')
def test_001_call(self, _build_args, _read_file):
# Arguments parsed
args = Mock()
args.cfg = 'config.json'
args.jid = 'test@localhost'
args.pwd = 'test'
args.srv = '127.0.0.1:5555'
args.api = 'localhost:8082'
args.debug = True
_build_args.return_value = args
# Config file
_read_file.return_value = {
'bus': {
'jid': 'iamrobert',
'password': 'mysuperstrongpassword',
}
}
# Result
configs = parse_init()
self.assertEqual(configs, {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
})
class TestExhaustiveConfig(TestCase):
def test_001_call(self):
parsed_configs = {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
}
self.assertIsInstance(exhaustive_config(parsed_configs), dict)
wrong_config = {
'bus': {
'jid': 'test@localhost'
}
}
with self.assertRaises(SystemExit) as call:
exhaustive_config(wrong_config)
self.assertEqual(call.exception.code, 1)
|
<commit_before><commit_msg>Add unit tests on commands.<commit_after>from unittest import TestCase
from mock import Mock, patch
from nyuki.commands import (_update_config, _merge_config, parse_init,
exhaustive_config)
class TestUpdateConfig(TestCase):
def test_001_call(self):
source = {'a': 1, 'b': {'c': 2}}
# Update
_update_config(source, '1', 'a')
self.assertEqual(source['a'], '1')
# Nested update
_update_config(source, 3, 'b.c')
self.assertEqual(source['b']['c'], 3)
# Create
_update_config(source, 4, 'b.d')
self.assertEqual(source['b']['d'], 4)
class TestMergeConfig(TestCase):
def test_001_call(self):
dict1 = {'a': 1, 'b': {'c': 2}}
dict2 = {'b': {'d': 3}}
result = _merge_config(dict1, dict2)
self.assertEqual(result, {'a': 1, 'b': {'c': 2, 'd': 3}})
class TestParseInit(TestCase):
@patch('nyuki.commands._read_file')
@patch('nyuki.commands._build_args')
def test_001_call(self, _build_args, _read_file):
# Arguments parsed
args = Mock()
args.cfg = 'config.json'
args.jid = 'test@localhost'
args.pwd = 'test'
args.srv = '127.0.0.1:5555'
args.api = 'localhost:8082'
args.debug = True
_build_args.return_value = args
# Config file
_read_file.return_value = {
'bus': {
'jid': 'iamrobert',
'password': 'mysuperstrongpassword',
}
}
# Result
configs = parse_init()
self.assertEqual(configs, {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
})
class TestExhaustiveConfig(TestCase):
def test_001_call(self):
parsed_configs = {
'bus': {
'jid': 'test@localhost',
'password': 'test',
'host': '127.0.0.1',
'port': 5555
},
'api': {
'port': 8082,
'host': 'localhost'
},
'log': {
'root': {
'level': 'DEBUG'}
}
}
self.assertIsInstance(exhaustive_config(parsed_configs), dict)
wrong_config = {
'bus': {
'jid': 'test@localhost'
}
}
with self.assertRaises(SystemExit) as call:
exhaustive_config(wrong_config)
self.assertEqual(call.exception.code, 1)
|
|
64be453e86c8e1a9461c901028078a12deaa4c32
|
spyder_unittesting/widgets/tests/test_unittestinggui.py
|
spyder_unittesting/widgets/tests/test_unittestinggui.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016- The Spyder Development Team
# Licensed under the terms of the MIT License
# (see ../../LICENSE for details)
"""Tests for unittestinggui.py"""
import os
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication() # without this line, the import below segfaults
from spyder_unittesting.widgets.unittestinggui import UnitTestingWidget
def test_run_tests_and_display_results(qtbot, tmpdir):
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write("def test_ok(): assert 1+1 == 2\n"
"def test_fail(): assert 1+1 == 3\n")
widget = UnitTestingWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(1000) # wait for tests to run
datatree = widget.datatree
assert datatree.topLevelItemCount() == 2
assert datatree.topLevelItem(0).data(0, Qt.DisplayRole) == 'ok'
assert datatree.topLevelItem(0).data(1, Qt.DisplayRole) == 'test_foo.test_ok'
assert datatree.topLevelItem(0).data(2, Qt.DisplayRole) is None
assert datatree.topLevelItem(1).data(0, Qt.DisplayRole) == 'failure'
assert datatree.topLevelItem(1).data(1, Qt.DisplayRole) == 'test_foo.test_fail'
assert datatree.topLevelItem(1).data(2, Qt.DisplayRole) == 'assert (1 + 1) == 3'
|
Add integration test covering basic functionality
|
Add integration test covering basic functionality
|
Python
|
mit
|
jitseniesen/spyder-unittest
|
Add integration test covering basic functionality
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016- The Spyder Development Team
# Licensed under the terms of the MIT License
# (see ../../LICENSE for details)
"""Tests for unittestinggui.py"""
import os
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication() # without this line, the import below segfaults
from spyder_unittesting.widgets.unittestinggui import UnitTestingWidget
def test_run_tests_and_display_results(qtbot, tmpdir):
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write("def test_ok(): assert 1+1 == 2\n"
"def test_fail(): assert 1+1 == 3\n")
widget = UnitTestingWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(1000) # wait for tests to run
datatree = widget.datatree
assert datatree.topLevelItemCount() == 2
assert datatree.topLevelItem(0).data(0, Qt.DisplayRole) == 'ok'
assert datatree.topLevelItem(0).data(1, Qt.DisplayRole) == 'test_foo.test_ok'
assert datatree.topLevelItem(0).data(2, Qt.DisplayRole) is None
assert datatree.topLevelItem(1).data(0, Qt.DisplayRole) == 'failure'
assert datatree.topLevelItem(1).data(1, Qt.DisplayRole) == 'test_foo.test_fail'
assert datatree.topLevelItem(1).data(2, Qt.DisplayRole) == 'assert (1 + 1) == 3'
|
<commit_before><commit_msg>Add integration test covering basic functionality<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016- The Spyder Development Team
# Licensed under the terms of the MIT License
# (see ../../LICENSE for details)
"""Tests for unittestinggui.py"""
import os
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication() # without this line, the import below segfaults
from spyder_unittesting.widgets.unittestinggui import UnitTestingWidget
def test_run_tests_and_display_results(qtbot, tmpdir):
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write("def test_ok(): assert 1+1 == 2\n"
"def test_fail(): assert 1+1 == 3\n")
widget = UnitTestingWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(1000) # wait for tests to run
datatree = widget.datatree
assert datatree.topLevelItemCount() == 2
assert datatree.topLevelItem(0).data(0, Qt.DisplayRole) == 'ok'
assert datatree.topLevelItem(0).data(1, Qt.DisplayRole) == 'test_foo.test_ok'
assert datatree.topLevelItem(0).data(2, Qt.DisplayRole) is None
assert datatree.topLevelItem(1).data(0, Qt.DisplayRole) == 'failure'
assert datatree.topLevelItem(1).data(1, Qt.DisplayRole) == 'test_foo.test_fail'
assert datatree.topLevelItem(1).data(2, Qt.DisplayRole) == 'assert (1 + 1) == 3'
|
Add integration test covering basic functionality# -*- coding: utf-8 -*-
#
# Copyright © 2016- The Spyder Development Team
# Licensed under the terms of the MIT License
# (see ../../LICENSE for details)
"""Tests for unittestinggui.py"""
import os
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication() # without this line, the import below segfaults
from spyder_unittesting.widgets.unittestinggui import UnitTestingWidget
def test_run_tests_and_display_results(qtbot, tmpdir):
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write("def test_ok(): assert 1+1 == 2\n"
"def test_fail(): assert 1+1 == 3\n")
widget = UnitTestingWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(1000) # wait for tests to run
datatree = widget.datatree
assert datatree.topLevelItemCount() == 2
assert datatree.topLevelItem(0).data(0, Qt.DisplayRole) == 'ok'
assert datatree.topLevelItem(0).data(1, Qt.DisplayRole) == 'test_foo.test_ok'
assert datatree.topLevelItem(0).data(2, Qt.DisplayRole) is None
assert datatree.topLevelItem(1).data(0, Qt.DisplayRole) == 'failure'
assert datatree.topLevelItem(1).data(1, Qt.DisplayRole) == 'test_foo.test_fail'
assert datatree.topLevelItem(1).data(2, Qt.DisplayRole) == 'assert (1 + 1) == 3'
|
<commit_before><commit_msg>Add integration test covering basic functionality<commit_after># -*- coding: utf-8 -*-
#
# Copyright © 2016- The Spyder Development Team
# Licensed under the terms of the MIT License
# (see ../../LICENSE for details)
"""Tests for unittestinggui.py"""
import os
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication() # without this line, the import below segfaults
from spyder_unittesting.widgets.unittestinggui import UnitTestingWidget
def test_run_tests_and_display_results(qtbot, tmpdir):
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write("def test_ok(): assert 1+1 == 2\n"
"def test_fail(): assert 1+1 == 3\n")
widget = UnitTestingWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(1000) # wait for tests to run
datatree = widget.datatree
assert datatree.topLevelItemCount() == 2
assert datatree.topLevelItem(0).data(0, Qt.DisplayRole) == 'ok'
assert datatree.topLevelItem(0).data(1, Qt.DisplayRole) == 'test_foo.test_ok'
assert datatree.topLevelItem(0).data(2, Qt.DisplayRole) is None
assert datatree.topLevelItem(1).data(0, Qt.DisplayRole) == 'failure'
assert datatree.topLevelItem(1).data(1, Qt.DisplayRole) == 'test_foo.test_fail'
assert datatree.topLevelItem(1).data(2, Qt.DisplayRole) == 'assert (1 + 1) == 3'
|
|
332a1d777a5205886a7bd941155ba178a577e601
|
py/average-of-levels-in-binary-tree.py
|
py/average-of-levels-in-binary-tree.py
|
from collections import defaultdict
from Queue import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
levels = defaultdict(list)
q = Queue()
q.put((root, 0))
while not q.empty():
v, depth = q.get()
levels[depth].append(v.val)
if v.left:
q.put((v.left, depth + 1))
if v.right:
q.put((v.right, depth + 1))
d = 0
ans = []
while True:
if not levels[d]:
break
ans.append(float(sum(levels[d])) / len(levels[d]))
d += 1
return ans
|
Add py solution for 637. Average of Levels in Binary Tree
|
Add py solution for 637. Average of Levels in Binary Tree
637. Average of Levels in Binary Tree: https://leetcode.com/problems/average-of-levels-in-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 637. Average of Levels in Binary Tree
637. Average of Levels in Binary Tree: https://leetcode.com/problems/average-of-levels-in-binary-tree/
|
from collections import defaultdict
from Queue import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
levels = defaultdict(list)
q = Queue()
q.put((root, 0))
while not q.empty():
v, depth = q.get()
levels[depth].append(v.val)
if v.left:
q.put((v.left, depth + 1))
if v.right:
q.put((v.right, depth + 1))
d = 0
ans = []
while True:
if not levels[d]:
break
ans.append(float(sum(levels[d])) / len(levels[d]))
d += 1
return ans
|
<commit_before><commit_msg>Add py solution for 637. Average of Levels in Binary Tree
637. Average of Levels in Binary Tree: https://leetcode.com/problems/average-of-levels-in-binary-tree/<commit_after>
|
from collections import defaultdict
from Queue import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
levels = defaultdict(list)
q = Queue()
q.put((root, 0))
while not q.empty():
v, depth = q.get()
levels[depth].append(v.val)
if v.left:
q.put((v.left, depth + 1))
if v.right:
q.put((v.right, depth + 1))
d = 0
ans = []
while True:
if not levels[d]:
break
ans.append(float(sum(levels[d])) / len(levels[d]))
d += 1
return ans
|
Add py solution for 637. Average of Levels in Binary Tree
637. Average of Levels in Binary Tree: https://leetcode.com/problems/average-of-levels-in-binary-tree/from collections import defaultdict
from Queue import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
levels = defaultdict(list)
q = Queue()
q.put((root, 0))
while not q.empty():
v, depth = q.get()
levels[depth].append(v.val)
if v.left:
q.put((v.left, depth + 1))
if v.right:
q.put((v.right, depth + 1))
d = 0
ans = []
while True:
if not levels[d]:
break
ans.append(float(sum(levels[d])) / len(levels[d]))
d += 1
return ans
|
<commit_before><commit_msg>Add py solution for 637. Average of Levels in Binary Tree
637. Average of Levels in Binary Tree: https://leetcode.com/problems/average-of-levels-in-binary-tree/<commit_after>from collections import defaultdict
from Queue import Queue
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
levels = defaultdict(list)
q = Queue()
q.put((root, 0))
while not q.empty():
v, depth = q.get()
levels[depth].append(v.val)
if v.left:
q.put((v.left, depth + 1))
if v.right:
q.put((v.right, depth + 1))
d = 0
ans = []
while True:
if not levels[d]:
break
ans.append(float(sum(levels[d])) / len(levels[d]))
d += 1
return ans
|
|
9bcedcb7ce07a0436b5b27d5f76bde69b4103f3f
|
press_releases/migrations/0008_auto_20161128_1049.py
|
press_releases/migrations/0008_auto_20161128_1049.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0007_auto_20161117_1201'),
]
operations = [
migrations.AlterField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', help_text=b'The hero image for this content.', blank=True, to='icekit_plugins_image.Image', on_delete=django.db.models.deletion.SET_NULL, null=True),
),
]
|
Add hero to press release listing
|
Add hero to press release listing
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/icekit-press-releases,ic-labs/icekit-press-releases,ic-labs/django-icekit,ic-labs/django-icekit
|
Add hero to press release listing
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0007_auto_20161117_1201'),
]
operations = [
migrations.AlterField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', help_text=b'The hero image for this content.', blank=True, to='icekit_plugins_image.Image', on_delete=django.db.models.deletion.SET_NULL, null=True),
),
]
|
<commit_before><commit_msg>Add hero to press release listing<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0007_auto_20161117_1201'),
]
operations = [
migrations.AlterField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', help_text=b'The hero image for this content.', blank=True, to='icekit_plugins_image.Image', on_delete=django.db.models.deletion.SET_NULL, null=True),
),
]
|
Add hero to press release listing# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0007_auto_20161117_1201'),
]
operations = [
migrations.AlterField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', help_text=b'The hero image for this content.', blank=True, to='icekit_plugins_image.Image', on_delete=django.db.models.deletion.SET_NULL, null=True),
),
]
|
<commit_before><commit_msg>Add hero to press release listing<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('icekit_press_releases', '0007_auto_20161117_1201'),
]
operations = [
migrations.AlterField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', help_text=b'The hero image for this content.', blank=True, to='icekit_plugins_image.Image', on_delete=django.db.models.deletion.SET_NULL, null=True),
),
]
|
|
5f38319bcc2cad3f7d8ea1985379b51e89e32f44
|
tests/test_rabbitmq.py
|
tests/test_rabbitmq.py
|
import mock
from pubsub.backend.rabbitmq import RabbitMQ, RabbitMQPublisher
class TestRabbitMQ(object):
def setup_class(self):
self.backend = RabbitMQ()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.start')
def test_call_start_publisher(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQSubscriber.start')
def test_call_start_subscriber(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_call_publish(self, mocked_function):
self.backend.publish(None)
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_publisher_publish_args(self, mocked_function):
self.backend.publish('message')
mocked_function.assert_called_with('message')
class TestRabbitMQPublisher(object):
def setup_class(self):
self.publisher = RabbitMQPublisher()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._connect')
def test_call_connect(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_exchange')
def test_call_create_exchange(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_producer')
def test_call_create_producer(self, mocked_function):
self.publisher.start()
assert self.publisher._create_producer.called
def test_call_producer_publish(self):
self.publisher._producer = mock.Mock()
self.publisher.publish(None)
assert self.publisher._producer.publish.called
|
Add tests for RabbitMQ backend
|
Add tests for RabbitMQ backend
We are testing here RabbitMQ Backend and RabbitMQ publisher class.
I'm not sure yet how to implement Subscriber class
|
Python
|
mit
|
csarcom/python-pubsub,WeLikeAlpacas/python-pubsub,WeLikeAlpacas/Qpaca
|
Add tests for RabbitMQ backend
We are testing here RabbitMQ Backend and RabbitMQ publisher class.
I'm not sure yet how to implement Subscriber class
|
import mock
from pubsub.backend.rabbitmq import RabbitMQ, RabbitMQPublisher
class TestRabbitMQ(object):
def setup_class(self):
self.backend = RabbitMQ()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.start')
def test_call_start_publisher(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQSubscriber.start')
def test_call_start_subscriber(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_call_publish(self, mocked_function):
self.backend.publish(None)
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_publisher_publish_args(self, mocked_function):
self.backend.publish('message')
mocked_function.assert_called_with('message')
class TestRabbitMQPublisher(object):
def setup_class(self):
self.publisher = RabbitMQPublisher()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._connect')
def test_call_connect(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_exchange')
def test_call_create_exchange(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_producer')
def test_call_create_producer(self, mocked_function):
self.publisher.start()
assert self.publisher._create_producer.called
def test_call_producer_publish(self):
self.publisher._producer = mock.Mock()
self.publisher.publish(None)
assert self.publisher._producer.publish.called
|
<commit_before><commit_msg>Add tests for RabbitMQ backend
We are testing here RabbitMQ Backend and RabbitMQ publisher class.
I'm not sure yet how to implement Subscriber class<commit_after>
|
import mock
from pubsub.backend.rabbitmq import RabbitMQ, RabbitMQPublisher
class TestRabbitMQ(object):
def setup_class(self):
self.backend = RabbitMQ()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.start')
def test_call_start_publisher(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQSubscriber.start')
def test_call_start_subscriber(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_call_publish(self, mocked_function):
self.backend.publish(None)
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_publisher_publish_args(self, mocked_function):
self.backend.publish('message')
mocked_function.assert_called_with('message')
class TestRabbitMQPublisher(object):
def setup_class(self):
self.publisher = RabbitMQPublisher()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._connect')
def test_call_connect(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_exchange')
def test_call_create_exchange(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_producer')
def test_call_create_producer(self, mocked_function):
self.publisher.start()
assert self.publisher._create_producer.called
def test_call_producer_publish(self):
self.publisher._producer = mock.Mock()
self.publisher.publish(None)
assert self.publisher._producer.publish.called
|
Add tests for RabbitMQ backend
We are testing here RabbitMQ Backend and RabbitMQ publisher class.
I'm not sure yet how to implement Subscriber classimport mock
from pubsub.backend.rabbitmq import RabbitMQ, RabbitMQPublisher
class TestRabbitMQ(object):
def setup_class(self):
self.backend = RabbitMQ()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.start')
def test_call_start_publisher(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQSubscriber.start')
def test_call_start_subscriber(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_call_publish(self, mocked_function):
self.backend.publish(None)
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_publisher_publish_args(self, mocked_function):
self.backend.publish('message')
mocked_function.assert_called_with('message')
class TestRabbitMQPublisher(object):
def setup_class(self):
self.publisher = RabbitMQPublisher()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._connect')
def test_call_connect(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_exchange')
def test_call_create_exchange(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_producer')
def test_call_create_producer(self, mocked_function):
self.publisher.start()
assert self.publisher._create_producer.called
def test_call_producer_publish(self):
self.publisher._producer = mock.Mock()
self.publisher.publish(None)
assert self.publisher._producer.publish.called
|
<commit_before><commit_msg>Add tests for RabbitMQ backend
We are testing here RabbitMQ Backend and RabbitMQ publisher class.
I'm not sure yet how to implement Subscriber class<commit_after>import mock
from pubsub.backend.rabbitmq import RabbitMQ, RabbitMQPublisher
class TestRabbitMQ(object):
def setup_class(self):
self.backend = RabbitMQ()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.start')
def test_call_start_publisher(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQSubscriber.start')
def test_call_start_subscriber(self, mocked_function):
self.backend.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_call_publish(self, mocked_function):
self.backend.publish(None)
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher.publish')
def test_publisher_publish_args(self, mocked_function):
self.backend.publish('message')
mocked_function.assert_called_with('message')
class TestRabbitMQPublisher(object):
def setup_class(self):
self.publisher = RabbitMQPublisher()
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._connect')
def test_call_connect(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_exchange')
def test_call_create_exchange(self, mocked_function):
self.publisher.start()
assert mocked_function.called
@mock.patch('pubsub.backend.rabbitmq.RabbitMQPublisher._create_producer')
def test_call_create_producer(self, mocked_function):
self.publisher.start()
assert self.publisher._create_producer.called
def test_call_producer_publish(self):
self.publisher._producer = mock.Mock()
self.publisher.publish(None)
assert self.publisher._producer.publish.called
|
|
64cbcc160dd3f7a6f8e59592afda302522314404
|
Functions/Python/template/test/test_lambda_function.py
|
Functions/Python/template/test/test_lambda_function.py
|
"""Created By: Andrew Ryan DeFilippis"""
import contextlib
import re
import unittest
from io import StringIO
import context
import lambda_function
class TestLambdaFunction(unittest.TestCase):
"""Test all the Lambda Function things!
"""
def test_cwlogs_event_format(self):
"""Verify the format of a log event sent to CWLogs.
"""
log = lambda_function.CWLogs(context)
output = StringIO()
with contextlib.redirect_stdout(output):
log.event('Message')
output = output.getvalue().strip()
event = re.match((
"^LOG "
"RequestId: "
"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
"\t"
"Message$"
), output)
self.assertIsNotNone(event)
def test_invocation_response(self):
"""Verify successful invocation of the Function.
"""
expected_result = {'Hello': 'World!'}
result = lambda_function.local_test()
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
|
Add unittest for the Lambda Function
|
Add unittest for the Lambda Function
|
Python
|
apache-2.0
|
andrewdefilippis/aws-lambda
|
Add unittest for the Lambda Function
|
"""Created By: Andrew Ryan DeFilippis"""
import contextlib
import re
import unittest
from io import StringIO
import context
import lambda_function
class TestLambdaFunction(unittest.TestCase):
"""Test all the Lambda Function things!
"""
def test_cwlogs_event_format(self):
"""Verify the format of a log event sent to CWLogs.
"""
log = lambda_function.CWLogs(context)
output = StringIO()
with contextlib.redirect_stdout(output):
log.event('Message')
output = output.getvalue().strip()
event = re.match((
"^LOG "
"RequestId: "
"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
"\t"
"Message$"
), output)
self.assertIsNotNone(event)
def test_invocation_response(self):
"""Verify successful invocation of the Function.
"""
expected_result = {'Hello': 'World!'}
result = lambda_function.local_test()
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittest for the Lambda Function<commit_after>
|
"""Created By: Andrew Ryan DeFilippis"""
import contextlib
import re
import unittest
from io import StringIO
import context
import lambda_function
class TestLambdaFunction(unittest.TestCase):
"""Test all the Lambda Function things!
"""
def test_cwlogs_event_format(self):
"""Verify the format of a log event sent to CWLogs.
"""
log = lambda_function.CWLogs(context)
output = StringIO()
with contextlib.redirect_stdout(output):
log.event('Message')
output = output.getvalue().strip()
event = re.match((
"^LOG "
"RequestId: "
"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
"\t"
"Message$"
), output)
self.assertIsNotNone(event)
def test_invocation_response(self):
"""Verify successful invocation of the Function.
"""
expected_result = {'Hello': 'World!'}
result = lambda_function.local_test()
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
|
Add unittest for the Lambda Function"""Created By: Andrew Ryan DeFilippis"""
import contextlib
import re
import unittest
from io import StringIO
import context
import lambda_function
class TestLambdaFunction(unittest.TestCase):
"""Test all the Lambda Function things!
"""
def test_cwlogs_event_format(self):
"""Verify the format of a log event sent to CWLogs.
"""
log = lambda_function.CWLogs(context)
output = StringIO()
with contextlib.redirect_stdout(output):
log.event('Message')
output = output.getvalue().strip()
event = re.match((
"^LOG "
"RequestId: "
"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
"\t"
"Message$"
), output)
self.assertIsNotNone(event)
def test_invocation_response(self):
"""Verify successful invocation of the Function.
"""
expected_result = {'Hello': 'World!'}
result = lambda_function.local_test()
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittest for the Lambda Function<commit_after>"""Created By: Andrew Ryan DeFilippis"""
import contextlib
import re
import unittest
from io import StringIO
import context
import lambda_function
class TestLambdaFunction(unittest.TestCase):
"""Test all the Lambda Function things!
"""
def test_cwlogs_event_format(self):
"""Verify the format of a log event sent to CWLogs.
"""
log = lambda_function.CWLogs(context)
output = StringIO()
with contextlib.redirect_stdout(output):
log.event('Message')
output = output.getvalue().strip()
event = re.match((
"^LOG "
"RequestId: "
"[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
"\t"
"Message$"
), output)
self.assertIsNotNone(event)
def test_invocation_response(self):
"""Verify successful invocation of the Function.
"""
expected_result = {'Hello': 'World!'}
result = lambda_function.local_test()
self.assertEqual(expected_result, result)
if __name__ == '__main__':
unittest.main()
|
|
cf732aaebfd4ed3f7ac2e8475a52e59934d193d6
|
python/test/annotator/audio/wav2vec2_for_ctc_test.py
|
python/test/annotator/audio/wav2vec2_for_ctc_test.py
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from pyspark.sql.functions import col
from test.util import SparkSessionForTest
@pytest.mark.fast
class Wav2Vec2ForCTCTestSpec(unittest.TestCase):
def setUp(self):
audio_path = os.getcwd() + "/../src/test/resources/audio/json/audio_floats.json"
self.data = SparkSessionForTest.spark.read.option("inferSchema", value=True).json(audio_path) \
.select(col("float_array").cast("array<float>").alias("audio_content"))
def runTest(self):
self.data.show()
audio_assembler = AudioAssembler() \
.setInputCol("audio_content") \
.setOutputCol("audio_assembler")
speech_to_text = Wav2Vec2ForCTC \
.pretrained()\
.setInputCols("audio_assembler") \
.setOutputCol("text")
pipeline = Pipeline(stages=[
audio_assembler,
speech_to_text,
])
model = pipeline.fit(self.data)
result_df = model.transform(self.data)
assert result_df.select("text").count() > 0
|
Add Wav2Vec2 unit test to Python
|
Add Wav2Vec2 unit test to Python
|
Python
|
apache-2.0
|
JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp
|
Add Wav2Vec2 unit test to Python
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from pyspark.sql.functions import col
from test.util import SparkSessionForTest
@pytest.mark.fast
class Wav2Vec2ForCTCTestSpec(unittest.TestCase):
def setUp(self):
audio_path = os.getcwd() + "/../src/test/resources/audio/json/audio_floats.json"
self.data = SparkSessionForTest.spark.read.option("inferSchema", value=True).json(audio_path) \
.select(col("float_array").cast("array<float>").alias("audio_content"))
def runTest(self):
self.data.show()
audio_assembler = AudioAssembler() \
.setInputCol("audio_content") \
.setOutputCol("audio_assembler")
speech_to_text = Wav2Vec2ForCTC \
.pretrained()\
.setInputCols("audio_assembler") \
.setOutputCol("text")
pipeline = Pipeline(stages=[
audio_assembler,
speech_to_text,
])
model = pipeline.fit(self.data)
result_df = model.transform(self.data)
assert result_df.select("text").count() > 0
|
<commit_before><commit_msg>Add Wav2Vec2 unit test to Python<commit_after>
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from pyspark.sql.functions import col
from test.util import SparkSessionForTest
@pytest.mark.fast
class Wav2Vec2ForCTCTestSpec(unittest.TestCase):
def setUp(self):
audio_path = os.getcwd() + "/../src/test/resources/audio/json/audio_floats.json"
self.data = SparkSessionForTest.spark.read.option("inferSchema", value=True).json(audio_path) \
.select(col("float_array").cast("array<float>").alias("audio_content"))
def runTest(self):
self.data.show()
audio_assembler = AudioAssembler() \
.setInputCol("audio_content") \
.setOutputCol("audio_assembler")
speech_to_text = Wav2Vec2ForCTC \
.pretrained()\
.setInputCols("audio_assembler") \
.setOutputCol("text")
pipeline = Pipeline(stages=[
audio_assembler,
speech_to_text,
])
model = pipeline.fit(self.data)
result_df = model.transform(self.data)
assert result_df.select("text").count() > 0
|
Add Wav2Vec2 unit test to Python# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from pyspark.sql.functions import col
from test.util import SparkSessionForTest
@pytest.mark.fast
class Wav2Vec2ForCTCTestSpec(unittest.TestCase):
def setUp(self):
audio_path = os.getcwd() + "/../src/test/resources/audio/json/audio_floats.json"
self.data = SparkSessionForTest.spark.read.option("inferSchema", value=True).json(audio_path) \
.select(col("float_array").cast("array<float>").alias("audio_content"))
def runTest(self):
self.data.show()
audio_assembler = AudioAssembler() \
.setInputCol("audio_content") \
.setOutputCol("audio_assembler")
speech_to_text = Wav2Vec2ForCTC \
.pretrained()\
.setInputCols("audio_assembler") \
.setOutputCol("text")
pipeline = Pipeline(stages=[
audio_assembler,
speech_to_text,
])
model = pipeline.fit(self.data)
result_df = model.transform(self.data)
assert result_df.select("text").count() > 0
|
<commit_before><commit_msg>Add Wav2Vec2 unit test to Python<commit_after># Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from pyspark.sql.functions import col
from test.util import SparkSessionForTest
@pytest.mark.fast
class Wav2Vec2ForCTCTestSpec(unittest.TestCase):
def setUp(self):
audio_path = os.getcwd() + "/../src/test/resources/audio/json/audio_floats.json"
self.data = SparkSessionForTest.spark.read.option("inferSchema", value=True).json(audio_path) \
.select(col("float_array").cast("array<float>").alias("audio_content"))
def runTest(self):
self.data.show()
audio_assembler = AudioAssembler() \
.setInputCol("audio_content") \
.setOutputCol("audio_assembler")
speech_to_text = Wav2Vec2ForCTC \
.pretrained()\
.setInputCols("audio_assembler") \
.setOutputCol("text")
pipeline = Pipeline(stages=[
audio_assembler,
speech_to_text,
])
model = pipeline.fit(self.data)
result_df = model.transform(self.data)
assert result_df.select("text").count() > 0
|
|
5bfc44f140b0d8a16c88aa98205bee71e08fc372
|
studygroups/management/commands/anonymize-opt-outs.py
|
studygroups/management/commands/anonymize-opt-outs.py
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Application
class Command(BaseCommand):
help = 'Anonymize applications that previously opted out'
def handle(self, *args, **options):
applications = Application.objects.filter(deleted_at__isnull=False)
print(f'About to anonymize {applications.count()} applications')
for application in applications:
application.anonymize()
|
Add tasks to anonymize past opt outs
|
Add tasks to anonymize past opt outs
|
Python
|
mit
|
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
|
Add tasks to anonymize past opt outs
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Application
class Command(BaseCommand):
help = 'Anonymize applications that previously opted out'
def handle(self, *args, **options):
applications = Application.objects.filter(deleted_at__isnull=False)
print(f'About to anonymize {applications.count()} applications')
for application in applications:
application.anonymize()
|
<commit_before><commit_msg>Add tasks to anonymize past opt outs<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Application
class Command(BaseCommand):
help = 'Anonymize applications that previously opted out'
def handle(self, *args, **options):
applications = Application.objects.filter(deleted_at__isnull=False)
print(f'About to anonymize {applications.count()} applications')
for application in applications:
application.anonymize()
|
Add tasks to anonymize past opt outsfrom django.core.management.base import BaseCommand, CommandError
from studygroups.models import Application
class Command(BaseCommand):
help = 'Anonymize applications that previously opted out'
def handle(self, *args, **options):
applications = Application.objects.filter(deleted_at__isnull=False)
print(f'About to anonymize {applications.count()} applications')
for application in applications:
application.anonymize()
|
<commit_before><commit_msg>Add tasks to anonymize past opt outs<commit_after>from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Application
class Command(BaseCommand):
help = 'Anonymize applications that previously opted out'
def handle(self, *args, **options):
applications = Application.objects.filter(deleted_at__isnull=False)
print(f'About to anonymize {applications.count()} applications')
for application in applications:
application.anonymize()
|
|
4e57f963d11dc66bb2bee46324121f1f229cb685
|
url/cli-open-url.py
|
url/cli-open-url.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# print out the HTML response
print(html)
|
Add open url (web page) script
|
Add open url (web page) script
|
Python
|
mit
|
rawswift/python-collections
|
Add open url (web page) script
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# print out the HTML response
print(html)
|
<commit_before><commit_msg>Add open url (web page) script<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# print out the HTML response
print(html)
|
Add open url (web page) script#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# print out the HTML response
print(html)
|
<commit_before><commit_msg>Add open url (web page) script<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# print out the HTML response
print(html)
|
|
79e06c8e07ff0b945145a2a29e3133e0714f9415
|
tests/integration/cli/dump_test.py
|
tests/integration/cli/dump_test.py
|
from ...testcases import DustyIntegrationTestCase
class TestDumpCLI(DustyIntegrationTestCase):
def test_dump(self):
result = self.run_command('dump')
self.assertInSameLine(result, 'COMMAND', 'Dusty Version')
|
Add simplest integration test ever for dump
|
Add simplest integration test ever for dump
|
Python
|
mit
|
gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty
|
Add simplest integration test ever for dump
|
from ...testcases import DustyIntegrationTestCase
class TestDumpCLI(DustyIntegrationTestCase):
def test_dump(self):
result = self.run_command('dump')
self.assertInSameLine(result, 'COMMAND', 'Dusty Version')
|
<commit_before><commit_msg>Add simplest integration test ever for dump<commit_after>
|
from ...testcases import DustyIntegrationTestCase
class TestDumpCLI(DustyIntegrationTestCase):
def test_dump(self):
result = self.run_command('dump')
self.assertInSameLine(result, 'COMMAND', 'Dusty Version')
|
Add simplest integration test ever for dumpfrom ...testcases import DustyIntegrationTestCase
class TestDumpCLI(DustyIntegrationTestCase):
def test_dump(self):
result = self.run_command('dump')
self.assertInSameLine(result, 'COMMAND', 'Dusty Version')
|
<commit_before><commit_msg>Add simplest integration test ever for dump<commit_after>from ...testcases import DustyIntegrationTestCase
class TestDumpCLI(DustyIntegrationTestCase):
def test_dump(self):
result = self.run_command('dump')
self.assertInSameLine(result, 'COMMAND', 'Dusty Version')
|
|
728c18bce41277781b2981c8a446100da19c0070
|
tests/test_lesson_0_hello_world.py
|
tests/test_lesson_0_hello_world.py
|
# This lesson teaches you how to define a Python method. Try running the tests for this file before doing anything!
# Here's a python script you can use to run your tests from the top level directory of this project
# `python -m unittest -vf tests.test_lesson_0_hello_world`
# That command looks like a lot at first. Let's break it down.
# `python` indicates that you want to run a python file
# -m tells python to run the module (tests.test_lesson_x) as a script
# unittest tells python that we're running unit tests on something
# -vf is the combination of two options that I'll outline separately below:
# -v encourages python to be as verbose as possible with its output
# -f tells python to stop running the tests once it hits a failure. This is entirely optional, but I
# want you to try running the tests one at a time, and fix each failure as it comes, rather than trying to
# make all the tests pass at once. That way you get a feel for how to build some python functions one idea at a time
# `tests.test_lesson_0_hello_world` is the specific module (composed of unit tests) that we're executing
# What error did you get when you first ran it?
# Remember, errors are built to be **helpful**, not scary!
# It should be an ImportError. This is Python telling you that it's unable to import the file `hello_world`
# So your first step should be to create `hello_world.py` in the `lessons/lesson_0_hello_world` directory
# Run the tests again.
import unittest
from lessons.lesson_0_hello_world import hello_world
class HelloWorldTestClass(unittest.TestCase):
def test_hello_function_exists(self):
func = hello_world.hello_world
self.assertIsNotNone(func)
def test_hello_function_output(self):
greeting = hello_world.hello_world()
self.assertEqual(greeting, "Hello World!")
|
Add test file for lesson 0.
|
Add test file for lesson 0.
|
Python
|
mit
|
thejessleigh/test_driven_python,thejessleigh/test_driven_python,thejessleigh/test_driven_python
|
Add test file for lesson 0.
|
# This lesson teaches you how to define a Python method. Try running the tests for this file before doing anything!
# Here's a python script you can use to run your tests from the top level directory of this project
# `python -m unittest -vf tests.test_lesson_0_hello_world`
# That command looks like a lot at first. Let's break it down.
# `python` indicates that you want to run a python file
# -m tells python to run the module (tests.test_lesson_x) as a script
# unittest tells python that we're running unit tests on something
# -vf is the combination of two options that I'll outline separately below:
# -v encourages python to be as verbose as possible with its output
# -f tells python to stop running the tests once it hits a failure. This is entirely optional, but I
# want you to try running the tests one at a time, and fix each failure as it comes, rather than trying to
# make all the tests pass at once. That way you get a feel for how to build some python functions one idea at a time
# `tests.test_lesson_0_hello_world` is the specific module (composed of unit tests) that we're executing
# What error did you get when you first ran it?
# Remember, errors are built to be **helpful**, not scary!
# It should be an ImportError. This is Python telling you that it's unable to import the file `hello_world`
# So your first step should be to create `hello_world.py` in the `lessons/lesson_0_hello_world` directory
# Run the tests again.
import unittest
from lessons.lesson_0_hello_world import hello_world
class HelloWorldTestClass(unittest.TestCase):
def test_hello_function_exists(self):
func = hello_world.hello_world
self.assertIsNotNone(func)
def test_hello_function_output(self):
greeting = hello_world.hello_world()
self.assertEqual(greeting, "Hello World!")
|
<commit_before><commit_msg>Add test file for lesson 0.<commit_after>
|
# This lesson teaches you how to define a Python method. Try running the tests for this file before doing anything!
# Here's a python script you can use to run your tests from the top level directory of this project
# `python -m unittest -vf tests.test_lesson_0_hello_world`
# That command looks like a lot at first. Let's break it down.
# `python` indicates that you want to run a python file
# -m tells python to run the module (tests.test_lesson_x) as a script
# unittest tells python that we're running unit tests on something
# -vf is the combination of two options that I'll outline separately below:
# -v encourages python to be as verbose as possible with its output
# -f tells python to stop running the tests once it hits a failure. This is entirely optional, but I
# want you to try running the tests one at a time, and fix each failure as it comes, rather than trying to
# make all the tests pass at once. That way you get a feel for how to build some python functions one idea at a time
# `tests.test_lesson_0_hello_world` is the specific module (composed of unit tests) that we're executing
# What error did you get when you first ran it?
# Remember, errors are built to be **helpful**, not scary!
# It should be an ImportError. This is Python telling you that it's unable to import the file `hello_world`
# So your first step should be to create `hello_world.py` in the `lessons/lesson_0_hello_world` directory
# Run the tests again.
import unittest
from lessons.lesson_0_hello_world import hello_world
class HelloWorldTestClass(unittest.TestCase):
def test_hello_function_exists(self):
func = hello_world.hello_world
self.assertIsNotNone(func)
def test_hello_function_output(self):
greeting = hello_world.hello_world()
self.assertEqual(greeting, "Hello World!")
|
Add test file for lesson 0.# This lesson teaches you how to define a Python method. Try running the tests for this file before doing anything!
# Here's a python script you can use to run your tests from the top level directory of this project
# `python -m unittest -vf tests.test_lesson_0_hello_world`
# That command looks like a lot at first. Let's break it down.
# `python` indicates that you want to run a python file
# -m tells python to run the module (tests.test_lesson_x) as a script
# unittest tells python that we're running unit tests on something
# -vf is the combination of two options that I'll outline separately below:
# -v encourages python to be as verbose as possible with its output
# -f tells python to stop running the tests once it hits a failure. This is entirely optional, but I
# want you to try running the tests one at a time, and fix each failure as it comes, rather than trying to
# make all the tests pass at once. That way you get a feel for how to build some python functions one idea at a time
# `tests.test_lesson_0_hello_world` is the specific module (composed of unit tests) that we're executing
# What error did you get when you first ran it?
# Remember, errors are built to be **helpful**, not scary!
# It should be an ImportError. This is Python telling you that it's unable to import the file `hello_world`
# So your first step should be to create `hello_world.py` in the `lessons/lesson_0_hello_world` directory
# Run the tests again.
import unittest
from lessons.lesson_0_hello_world import hello_world
class HelloWorldTestClass(unittest.TestCase):
def test_hello_function_exists(self):
func = hello_world.hello_world
self.assertIsNotNone(func)
def test_hello_function_output(self):
greeting = hello_world.hello_world()
self.assertEqual(greeting, "Hello World!")
|
<commit_before><commit_msg>Add test file for lesson 0.<commit_after># This lesson teaches you how to define a Python method. Try running the tests for this file before doing anything!
# Here's a python script you can use to run your tests from the top level directory of this project
# `python -m unittest -vf tests.test_lesson_0_hello_world`
# That command looks like a lot at first. Let's break it down.
# `python` indicates that you want to run a python file
# -m tells python to run the module (tests.test_lesson_x) as a script
# unittest tells python that we're running unit tests on something
# -vf is the combination of two options that I'll outline separately below:
# -v encourages python to be as verbose as possible with its output
# -f tells python to stop running the tests once it hits a failure. This is entirely optional, but I
# want you to try running the tests one at a time, and fix each failure as it comes, rather than trying to
# make all the tests pass at once. That way you get a feel for how to build some python functions one idea at a time
# `tests.test_lesson_0_hello_world` is the specific module (composed of unit tests) that we're executing
# What error did you get when you first ran it?
# Remember, errors are built to be **helpful**, not scary!
# It should be an ImportError. This is Python telling you that it's unable to import the file `hello_world`
# So your first step should be to create `hello_world.py` in the `lessons/lesson_0_hello_world` directory
# Run the tests again.
import unittest
from lessons.lesson_0_hello_world import hello_world
class HelloWorldTestClass(unittest.TestCase):
def test_hello_function_exists(self):
func = hello_world.hello_world
self.assertIsNotNone(func)
def test_hello_function_output(self):
greeting = hello_world.hello_world()
self.assertEqual(greeting, "Hello World!")
|
|
06c78c6f721f79ae5d72e4da0b6b966bbcb99fbf
|
test/test_gizmo_request.py
|
test/test_gizmo_request.py
|
from unittest import TestCase
from groundstation.transfer.request import Request
class TestGizmoRequest(TestCase):
def test_loadable_after_serializing(self):
gizmo = Request("LISTALLOBJECTS")
def test_rejects_invalid_verbs(self):
with self.assertRaises(Exception):
gizmo = Request("THISWILLNEVERBEAVALIDVERB")
|
Add beginnings of some gizmo test coverage
|
Add beginnings of some gizmo test coverage
|
Python
|
mit
|
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
|
Add beginnings of some gizmo test coverage
|
from unittest import TestCase
from groundstation.transfer.request import Request
class TestGizmoRequest(TestCase):
def test_loadable_after_serializing(self):
gizmo = Request("LISTALLOBJECTS")
def test_rejects_invalid_verbs(self):
with self.assertRaises(Exception):
gizmo = Request("THISWILLNEVERBEAVALIDVERB")
|
<commit_before><commit_msg>Add beginnings of some gizmo test coverage<commit_after>
|
from unittest import TestCase
from groundstation.transfer.request import Request
class TestGizmoRequest(TestCase):
def test_loadable_after_serializing(self):
gizmo = Request("LISTALLOBJECTS")
def test_rejects_invalid_verbs(self):
with self.assertRaises(Exception):
gizmo = Request("THISWILLNEVERBEAVALIDVERB")
|
Add beginnings of some gizmo test coveragefrom unittest import TestCase
from groundstation.transfer.request import Request
class TestGizmoRequest(TestCase):
def test_loadable_after_serializing(self):
gizmo = Request("LISTALLOBJECTS")
def test_rejects_invalid_verbs(self):
with self.assertRaises(Exception):
gizmo = Request("THISWILLNEVERBEAVALIDVERB")
|
<commit_before><commit_msg>Add beginnings of some gizmo test coverage<commit_after>from unittest import TestCase
from groundstation.transfer.request import Request
class TestGizmoRequest(TestCase):
def test_loadable_after_serializing(self):
gizmo = Request("LISTALLOBJECTS")
def test_rejects_invalid_verbs(self):
with self.assertRaises(Exception):
gizmo = Request("THISWILLNEVERBEAVALIDVERB")
|
|
6ee1612e73efc2209c8399a54cebb7cba7965c7d
|
tests/level/level_utils.py
|
tests/level/level_utils.py
|
from hunting.level.map import LevelMap, LevelTile
def generate_5x3_long_c():
level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)])
for x in range(1, 5):
level_map[x][1].blocks = True
return level_map
|
Add level generation test util fn
|
Add level generation test util fn
Maybe just create a general test util unless you're gonna build a
lotta these.
|
Python
|
mit
|
MoyTW/RL_Arena_Experiment
|
Add level generation test util fn
Maybe just create a general test util unless you're gonna build a
lotta these.
|
from hunting.level.map import LevelMap, LevelTile
def generate_5x3_long_c():
level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)])
for x in range(1, 5):
level_map[x][1].blocks = True
return level_map
|
<commit_before><commit_msg>Add level generation test util fn
Maybe just create a general test util unless you're gonna build a
lotta these.<commit_after>
|
from hunting.level.map import LevelMap, LevelTile
def generate_5x3_long_c():
level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)])
for x in range(1, 5):
level_map[x][1].blocks = True
return level_map
|
Add level generation test util fn
Maybe just create a general test util unless you're gonna build a
lotta these.from hunting.level.map import LevelMap, LevelTile
def generate_5x3_long_c():
level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)])
for x in range(1, 5):
level_map[x][1].blocks = True
return level_map
|
<commit_before><commit_msg>Add level generation test util fn
Maybe just create a general test util unless you're gonna build a
lotta these.<commit_after>from hunting.level.map import LevelMap, LevelTile
def generate_5x3_long_c():
level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)])
for x in range(1, 5):
level_map[x][1].blocks = True
return level_map
|
|
a389b1172880db069e1a12ba75665e5366699b95
|
tests/test_wind_turbine.py
|
tests/test_wind_turbine.py
|
import pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
from windpowerlib.wind_turbine import read_turbine_data, WindTurbine
class TestWindTurbine:
def test_error_raising(self):
self.test_turbine_data = {'hub_height': 100,
'rotor_diameter': 80,
'name': 'turbine_not_in_file',
'fetch_curve': 'power_curve',
'data_source': 'example_power_curves.csv'}
# Raise system exit
with pytest.raises(SystemExit):
test_turbine = WindTurbine(**self.test_turbine_data)
# Raise ValueError due to invalid parameter `fetch_curve`
self.test_turbine_data['fetch_curve'] = 'misspelling'
self.test_turbine_data['name'] = 'DUMMY 3'
with pytest.raises(ValueError):
test_turbine = WindTurbine(**self.test_turbine_data)
def test_read_turbine_data(self):
# Raise FileNotFoundError due to missing
with pytest.raises(FileNotFoundError):
read_turbine_data(filename='not_existent')
|
Add tests for wind_turbine module
|
Add tests for wind_turbine module
|
Python
|
mit
|
wind-python/windpowerlib
|
Add tests for wind_turbine module
|
import pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
from windpowerlib.wind_turbine import read_turbine_data, WindTurbine
class TestWindTurbine:
def test_error_raising(self):
self.test_turbine_data = {'hub_height': 100,
'rotor_diameter': 80,
'name': 'turbine_not_in_file',
'fetch_curve': 'power_curve',
'data_source': 'example_power_curves.csv'}
# Raise system exit
with pytest.raises(SystemExit):
test_turbine = WindTurbine(**self.test_turbine_data)
# Raise ValueError due to invalid parameter `fetch_curve`
self.test_turbine_data['fetch_curve'] = 'misspelling'
self.test_turbine_data['name'] = 'DUMMY 3'
with pytest.raises(ValueError):
test_turbine = WindTurbine(**self.test_turbine_data)
def test_read_turbine_data(self):
# Raise FileNotFoundError due to missing
with pytest.raises(FileNotFoundError):
read_turbine_data(filename='not_existent')
|
<commit_before><commit_msg>Add tests for wind_turbine module<commit_after>
|
import pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
from windpowerlib.wind_turbine import read_turbine_data, WindTurbine
class TestWindTurbine:
def test_error_raising(self):
self.test_turbine_data = {'hub_height': 100,
'rotor_diameter': 80,
'name': 'turbine_not_in_file',
'fetch_curve': 'power_curve',
'data_source': 'example_power_curves.csv'}
# Raise system exit
with pytest.raises(SystemExit):
test_turbine = WindTurbine(**self.test_turbine_data)
# Raise ValueError due to invalid parameter `fetch_curve`
self.test_turbine_data['fetch_curve'] = 'misspelling'
self.test_turbine_data['name'] = 'DUMMY 3'
with pytest.raises(ValueError):
test_turbine = WindTurbine(**self.test_turbine_data)
def test_read_turbine_data(self):
# Raise FileNotFoundError due to missing
with pytest.raises(FileNotFoundError):
read_turbine_data(filename='not_existent')
|
Add tests for wind_turbine moduleimport pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
from windpowerlib.wind_turbine import read_turbine_data, WindTurbine
class TestWindTurbine:
def test_error_raising(self):
self.test_turbine_data = {'hub_height': 100,
'rotor_diameter': 80,
'name': 'turbine_not_in_file',
'fetch_curve': 'power_curve',
'data_source': 'example_power_curves.csv'}
# Raise system exit
with pytest.raises(SystemExit):
test_turbine = WindTurbine(**self.test_turbine_data)
# Raise ValueError due to invalid parameter `fetch_curve`
self.test_turbine_data['fetch_curve'] = 'misspelling'
self.test_turbine_data['name'] = 'DUMMY 3'
with pytest.raises(ValueError):
test_turbine = WindTurbine(**self.test_turbine_data)
def test_read_turbine_data(self):
# Raise FileNotFoundError due to missing
with pytest.raises(FileNotFoundError):
read_turbine_data(filename='not_existent')
|
<commit_before><commit_msg>Add tests for wind_turbine module<commit_after>import pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
from windpowerlib.wind_turbine import read_turbine_data, WindTurbine
class TestWindTurbine:
def test_error_raising(self):
self.test_turbine_data = {'hub_height': 100,
'rotor_diameter': 80,
'name': 'turbine_not_in_file',
'fetch_curve': 'power_curve',
'data_source': 'example_power_curves.csv'}
# Raise system exit
with pytest.raises(SystemExit):
test_turbine = WindTurbine(**self.test_turbine_data)
# Raise ValueError due to invalid parameter `fetch_curve`
self.test_turbine_data['fetch_curve'] = 'misspelling'
self.test_turbine_data['name'] = 'DUMMY 3'
with pytest.raises(ValueError):
test_turbine = WindTurbine(**self.test_turbine_data)
def test_read_turbine_data(self):
# Raise FileNotFoundError due to missing
with pytest.raises(FileNotFoundError):
read_turbine_data(filename='not_existent')
|
|
3099161325dfd99f1bf32d62218e2aaed41f32cd
|
exp/sandbox/SparseSVDExp.py
|
exp/sandbox/SparseSVDExp.py
|
"""
Some code to see if there is any pattern in the SVD of a matrix with fixed
sparisty structure.
"""
import sys
import logging
import scipy.sparse
import numpy
from sparsesvd import sparsesvd
from exp.util.SparseUtils import SparseUtils
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(precision=3, suppress=True, linewidth=100)
m = 10
n = 10
r = 1
U0, s0, V0 = SparseUtils.generateLowRank((m, n), r)
numInds = 10
inds = numpy.unique(numpy.random.randint(0, m*n, numInds))
A = SparseUtils.reconstructLowRank(U0, s0, V0, inds)
#print(A.todense())
t0 = s0 + numpy.random.rand(s0.shape[0])*0.1
B = SparseUtils.reconstructLowRank(U0, t0, V0, inds)
#print(B.todense())
k = 9
U, s, V = sparsesvd(A, k)
U2, s2, V2 = sparsesvd(B, k)
print(A.todense())
print(U0)
print(s0)
print(V0)
print(U)
print(s)
print(V)
print(U2)
print(s2)
print(V2)
print(U2.T.dot(U))
#print(s2)
print(V2.T.dot(V))
#Now try for fixed singular vectors
|
Test if there is a pattern in sparsity structure
|
Test if there is a pattern in sparsity structure
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Test if there is a pattern in sparsity structure
|
"""
Some code to see if there is any pattern in the SVD of a matrix with fixed
sparisty structure.
"""
import sys
import logging
import scipy.sparse
import numpy
from sparsesvd import sparsesvd
from exp.util.SparseUtils import SparseUtils
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(precision=3, suppress=True, linewidth=100)
m = 10
n = 10
r = 1
U0, s0, V0 = SparseUtils.generateLowRank((m, n), r)
numInds = 10
inds = numpy.unique(numpy.random.randint(0, m*n, numInds))
A = SparseUtils.reconstructLowRank(U0, s0, V0, inds)
#print(A.todense())
t0 = s0 + numpy.random.rand(s0.shape[0])*0.1
B = SparseUtils.reconstructLowRank(U0, t0, V0, inds)
#print(B.todense())
k = 9
U, s, V = sparsesvd(A, k)
U2, s2, V2 = sparsesvd(B, k)
print(A.todense())
print(U0)
print(s0)
print(V0)
print(U)
print(s)
print(V)
print(U2)
print(s2)
print(V2)
print(U2.T.dot(U))
#print(s2)
print(V2.T.dot(V))
#Now try for fixed singular vectors
|
<commit_before><commit_msg>Test if there is a pattern in sparsity structure<commit_after>
|
"""
Some code to see if there is any pattern in the SVD of a matrix with fixed
sparisty structure.
"""
import sys
import logging
import scipy.sparse
import numpy
from sparsesvd import sparsesvd
from exp.util.SparseUtils import SparseUtils
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(precision=3, suppress=True, linewidth=100)
m = 10
n = 10
r = 1
U0, s0, V0 = SparseUtils.generateLowRank((m, n), r)
numInds = 10
inds = numpy.unique(numpy.random.randint(0, m*n, numInds))
A = SparseUtils.reconstructLowRank(U0, s0, V0, inds)
#print(A.todense())
t0 = s0 + numpy.random.rand(s0.shape[0])*0.1
B = SparseUtils.reconstructLowRank(U0, t0, V0, inds)
#print(B.todense())
k = 9
U, s, V = sparsesvd(A, k)
U2, s2, V2 = sparsesvd(B, k)
print(A.todense())
print(U0)
print(s0)
print(V0)
print(U)
print(s)
print(V)
print(U2)
print(s2)
print(V2)
print(U2.T.dot(U))
#print(s2)
print(V2.T.dot(V))
#Now try for fixed singular vectors
|
Test if there is a pattern in sparsity structure
"""
Some code to see if there is any pattern in the SVD of a matrix with fixed
sparisty structure.
"""
import sys
import logging
import scipy.sparse
import numpy
from sparsesvd import sparsesvd
from exp.util.SparseUtils import SparseUtils
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(precision=3, suppress=True, linewidth=100)
m = 10
n = 10
r = 1
U0, s0, V0 = SparseUtils.generateLowRank((m, n), r)
numInds = 10
inds = numpy.unique(numpy.random.randint(0, m*n, numInds))
A = SparseUtils.reconstructLowRank(U0, s0, V0, inds)
#print(A.todense())
t0 = s0 + numpy.random.rand(s0.shape[0])*0.1
B = SparseUtils.reconstructLowRank(U0, t0, V0, inds)
#print(B.todense())
k = 9
U, s, V = sparsesvd(A, k)
U2, s2, V2 = sparsesvd(B, k)
print(A.todense())
print(U0)
print(s0)
print(V0)
print(U)
print(s)
print(V)
print(U2)
print(s2)
print(V2)
print(U2.T.dot(U))
#print(s2)
print(V2.T.dot(V))
#Now try for fixed singular vectors
|
<commit_before><commit_msg>Test if there is a pattern in sparsity structure<commit_after>
"""
Some code to see if there is any pattern in the SVD of a matrix with fixed
sparisty structure.
"""
import sys
import logging
import scipy.sparse
import numpy
from sparsesvd import sparsesvd
from exp.util.SparseUtils import SparseUtils
numpy.random.seed(21)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(precision=3, suppress=True, linewidth=100)
m = 10
n = 10
r = 1
U0, s0, V0 = SparseUtils.generateLowRank((m, n), r)
numInds = 10
inds = numpy.unique(numpy.random.randint(0, m*n, numInds))
A = SparseUtils.reconstructLowRank(U0, s0, V0, inds)
#print(A.todense())
t0 = s0 + numpy.random.rand(s0.shape[0])*0.1
B = SparseUtils.reconstructLowRank(U0, t0, V0, inds)
#print(B.todense())
k = 9
U, s, V = sparsesvd(A, k)
U2, s2, V2 = sparsesvd(B, k)
print(A.todense())
print(U0)
print(s0)
print(V0)
print(U)
print(s)
print(V)
print(U2)
print(s2)
print(V2)
print(U2.T.dot(U))
#print(s2)
print(V2.T.dot(V))
#Now try for fixed singular vectors
|
|
a73e2975d74a3a15a4180974fc6dd75789bc8e78
|
zephyr/lib/stats.py
|
zephyr/lib/stats.py
|
import os
import logging
STATS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "stats")
def update_stat(name, value):
try:
os.mkdir(STATS_DIR)
except OSError:
pass
base_filename = os.path.join(STATS_DIR, name)
tmp_filename = base_filename + ".new"
try:
with file(tmp_filename, "w") as stat_file:
stat_file.write("%s\n" % (str(value),))
os.rename(tmp_filename, base_filename)
except (OSError, IOError) as e:
logging.info("Could not update statistic '%s': %s" % (name, e))
|
Add simple function for outputting statistics for use with munin
|
Add simple function for outputting statistics for use with munin
Eventually we will want to replace this with statsd.
(imported from commit 64246e9f2d13d72f53d009a5e3e456bc6be6296b)
|
Python
|
apache-2.0
|
ericzhou2008/zulip,calvinleenyc/zulip,wavelets/zulip,littledogboy/zulip,Jianchun1/zulip,praveenaki/zulip,bssrdf/zulip,noroot/zulip,bluesea/zulip,jimmy54/zulip,timabbott/zulip,bluesea/zulip,SmartPeople/zulip,natanovia/zulip,karamcnair/zulip,aliceriot/zulip,nicholasbs/zulip,SmartPeople/zulip,amallia/zulip,mdavid/zulip,xuanhan863/zulip,wdaher/zulip,littledogboy/zulip,zulip/zulip,paxapy/zulip,glovebx/zulip,qq1012803704/zulip,j831/zulip,amanharitsh123/zulip,mansilladev/zulip,christi3k/zulip,AZtheAsian/zulip,stamhe/zulip,hustlzp/zulip,dattatreya303/zulip,pradiptad/zulip,Gabriel0402/zulip,guiquanz/zulip,praveenaki/zulip,RobotCaleb/zulip,ashwinirudrappa/zulip,bitemyapp/zulip,bssrdf/zulip,peguin40/zulip,aliceriot/zulip,synicalsyntax/zulip,niftynei/zulip,hengqujushi/zulip,susansls/zulip,christi3k/zulip,peguin40/zulip,amallia/zulip,eeshangarg/zulip,peguin40/zulip,andersk/zulip,zorojean/zulip,aliceriot/zulip,dhcrzf/zulip,tiansiyuan/zulip,xuxiao/zulip,ahmadassaf/zulip,fw1121/zulip,ericzhou2008/zulip,armooo/zulip,souravbadami/zulip,ufosky-server/zulip,mahim97/zulip,EasonYi/zulip,showell/zulip,dattatreya303/zulip,akuseru/zulip,sup95/zulip,hustlzp/zulip,thomasboyt/zulip,kou/zulip,akuseru/zulip,gkotian/zulip,aakash-cr7/zulip,yuvipanda/zulip,timabbott/zulip,Qgap/zulip,sup95/zulip,wangdeshui/zulip,SmartPeople/zulip,firstblade/zulip,Batterfii/zulip,yuvipanda/zulip,mohsenSy/zulip,m1ssou/zulip,grave-w-grave/zulip,Gabriel0402/zulip,armooo/zulip,MayB/zulip,atomic-labs/zulip,niftynei/zulip,JanzTam/zulip,sonali0901/zulip,stamhe/zulip,dotcool/zulip,avastu/zulip,vabs22/zulip,he15his/zulip,schatt/zulip,jeffcao/zulip,vakila/zulip,zwily/zulip,souravbadami/zulip,Frouk/zulip,dawran6/zulip,tbutter/zulip,alliejones/zulip,he15his/zulip,zachallaun/zulip,Diptanshu8/zulip,sharmaeklavya2/zulip,PhilSk/zulip,JanzTam/zulip,willingc/zulip,RobotCaleb/zulip,SmartPeople/zulip,hackerkid/zulip,Cheppers/zulip,peguin40/zulip,technicalpickles/zulip,xuxiao/zulip,suxinde2009/zulip,hafeez3000/zulip,Galexrt/zulip,cosmicAsymmetry/zulip,hj3938/zulip,dxq-git/zulip,moria/zulip,arpith/zulip,isht3/zulip,Suninus/zulip,thomasboyt/zulip,aps-sids/zulip,themass/zulip,dattatreya303/zulip,littledogboy/zulip,grave-w-grave/zulip,thomasboyt/zulip,vikas-parashar/zulip,zacps/zulip,guiquanz/zulip,nicholasbs/zulip,dhcrzf/zulip,codeKonami/zulip,dnmfarrell/zulip,jerryge/zulip,joyhchen/zulip,christi3k/zulip,zachallaun/zulip,arpitpanwar/zulip,tiansiyuan/zulip,verma-varsha/zulip,seapasulli/zulip,mdavid/zulip,RobotCaleb/zulip,ryansnowboarder/zulip,arpith/zulip,krtkmj/zulip,mohsenSy/zulip,peguin40/zulip,bitemyapp/zulip,mansilladev/zulip,eastlhu/zulip,armooo/zulip,wdaher/zulip,ahmadassaf/zulip,ericzhou2008/zulip,bowlofstew/zulip,qq1012803704/zulip,dxq-git/zulip,Gabriel0402/zulip,synicalsyntax/zulip,noroot/zulip,levixie/zulip,babbage/zulip,susansls/zulip,vabs22/zulip,bssrdf/zulip,punchagan/zulip,SmartPeople/zulip,vabs22/zulip,ashwinirudrappa/zulip,seapasulli/zulip,vakila/zulip,noroot/zulip,adnanh/zulip,ryansnowboarder/zulip,vakila/zulip,peiwei/zulip,bastianh/zulip,stamhe/zulip,alliejones/zulip,rht/zulip,natanovia/zulip,xuanhan863/zulip,vaidap/zulip,moria/zulip,voidException/zulip,mansilladev/zulip,mahim97/zulip,ashwinirudrappa/zulip,Suninus/zulip,Frouk/zulip,JanzTam/zulip,babbage/zulip,atomic-labs/zulip,stamhe/zulip,technicalpickles/zulip,shubhamdhama/zulip,natanovia/zulip,yocome/zulip,vakila/zulip,amyliu345/zulip,zhaoweigg/zulip,Frouk/zulip,vikas-parashar/zulip,Vallher/zulip,johnny9/zulip,blaze225/zulip,firstblade/zulip,atomic-labs/zulip,PhilSk/zulip,MayB/zulip,proliming/zulip,vikas-parashar/zulip,fw1121/zulip,xuxiao/zulip,christi3k/zulip,ikasumiwt/zulip,karamcnair/zulip,lfranchi/zulip,bssrdf/zulip,tdr130/zulip,paxapy/zulip,wdaher/zulip,hustlzp/zulip,Drooids/zulip,so0k/zulip,deer-hope/zulip,bssrdf/zulip,synicalsyntax/zulip,zacps/zulip,KJin99/zulip,zulip/zulip,hj3938/zulip,easyfmxu/zulip,sharmaeklavya2/zulip,dattatreya303/zulip,huangkebo/zulip,wweiradio/zulip,hustlzp/zulip,eeshangarg/zulip,udxxabp/zulip,eastlhu/zulip,hafeez3000/zulip,developerfm/zulip,LeeRisk/zulip,bluesea/zulip,bowlofstew/zulip,showell/zulip,technicalpickles/zulip,shubhamdhama/zulip,dotcool/zulip,jackrzhang/zulip,itnihao/zulip,so0k/zulip,johnnygaddarr/zulip,proliming/zulip,krtkmj/zulip,dxq-git/zulip,TigorC/zulip,stamhe/zulip,zacps/zulip,Juanvulcano/zulip,grave-w-grave/zulip,Cheppers/zulip,JPJPJPOPOP/zulip,MayB/zulip,alliejones/zulip,hustlzp/zulip,zorojean/zulip,kou/zulip,samatdav/zulip,shaunstanislaus/zulip,jackrzhang/zulip,dwrpayne/zulip,hafeez3000/zulip,natanovia/zulip,reyha/zulip,avastu/zulip,he15his/zulip,Suninus/zulip,bastianh/zulip,ahmadassaf/zulip,tommyip/zulip,joyhchen/zulip,technicalpickles/zulip,wavelets/zulip,aliceriot/zulip,isht3/zulip,tbutter/zulip,akuseru/zulip,Qgap/zulip,yocome/zulip,samatdav/zulip,umkay/zulip,reyha/zulip,fw1121/zulip,MariaFaBella85/zulip,themass/zulip,PhilSk/zulip,ryanbackman/zulip,blaze225/zulip,jeffcao/zulip,ahmadassaf/zulip,avastu/zulip,sharmaeklavya2/zulip,niftynei/zulip,bluesea/zulip,fw1121/zulip,KJin99/zulip,karamcnair/zulip,huangkebo/zulip,Juanvulcano/zulip,kou/zulip,susansls/zulip,paxapy/zulip,noroot/zulip,udxxabp/zulip,mdavid/zulip,zhaoweigg/zulip,xuxiao/zulip,itnihao/zulip,suxinde2009/zulip,PaulPetring/zulip,Drooids/zulip,Batterfii/zulip,joyhchen/zulip,voidException/zulip,jerryge/zulip,hj3938/zulip,KingxBanana/zulip,krtkmj/zulip,zulip/zulip,mohsenSy/zulip,swinghu/zulip,lfranchi/zulip,hj3938/zulip,LeeRisk/zulip,itnihao/zulip,amyliu345/zulip,ufosky-server/zulip,jrowan/zulip,moria/zulip,amanharitsh123/zulip,johnny9/zulip,dotcool/zulip,rishig/zulip,yuvipanda/zulip,tiansiyuan/zulip,voidException/zulip,DazWorrall/zulip,huangkebo/zulip,deer-hope/zulip,schatt/zulip,he15his/zulip,hengqujushi/zulip,tommyip/zulip,j831/zulip,babbage/zulip,LeeRisk/zulip,TigorC/zulip,adnanh/zulip,stamhe/zulip,ipernet/zulip,yocome/zulip,yocome/zulip,levixie/zulip,armooo/zulip,xuxiao/zulip,tiansiyuan/zulip,synicalsyntax/zulip,aps-sids/zulip,jackrzhang/zulip,shaunstanislaus/zulip,natanovia/zulip,glovebx/zulip,hafeez3000/zulip,lfranchi/zulip,verma-varsha/zulip,schatt/zulip,wweiradio/zulip,themass/zulip,arpitpanwar/zulip,esander91/zulip,seapasulli/zulip,ahmadassaf/zulip,swinghu/zulip,AZtheAsian/zulip,jerryge/zulip,babbage/zulip,calvinleenyc/zulip,mahim97/zulip,ryanbackman/zulip,kaiyuanheshang/zulip,johnnygaddarr/zulip,Gabriel0402/zulip,rht/zulip,developerfm/zulip,udxxabp/zulip,brainwane/zulip,jackrzhang/zulip,Qgap/zulip,tdr130/zulip,jonesgithub/zulip,JPJPJPOPOP/zulip,jonesgithub/zulip,codeKonami/zulip,m1ssou/zulip,hackerkid/zulip,hustlzp/zulip,fw1121/zulip,he15his/zulip,bowlofstew/zulip,aakash-cr7/zulip,eeshangarg/zulip,jessedhillon/zulip,LeeRisk/zulip,Vallher/zulip,bastianh/zulip,ericzhou2008/zulip,jimmy54/zulip,joshisa/zulip,gkotian/zulip,vikas-parashar/zulip,EasonYi/zulip,MariaFaBella85/zulip,seapasulli/zulip,EasonYi/zulip,peiwei/zulip,joyhchen/zulip,m1ssou/zulip,verma-varsha/zulip,brockwhittaker/zulip,ikasumiwt/zulip,vakila/zulip,peguin40/zulip,atomic-labs/zulip,willingc/zulip,jrowan/zulip,grave-w-grave/zulip,esander91/zulip,johnny9/zulip,MayB/zulip,zorojean/zulip,easyfmxu/zulip,qq1012803704/zulip,glovebx/zulip,Diptanshu8/zulip,dnmfarrell/zulip,aakash-cr7/zulip,suxinde2009/zulip,showell/zulip,johnnygaddarr/zulip,zacps/zulip,jainayush975/zulip,luyifan/zulip,thomasboyt/zulip,zhaoweigg/zulip,kokoar/zulip,TigorC/zulip,cosmicAsymmetry/zulip,Galexrt/zulip,umkay/zulip,jackrzhang/zulip,shrikrishnaholla/zulip,brockwhittaker/zulip,dnmfarrell/zulip,dotcool/zulip,dawran6/zulip,pradiptad/zulip,so0k/zulip,mansilladev/zulip,armooo/zulip,jackrzhang/zulip,littledogboy/zulip,johnnygaddarr/zulip,ikasumiwt/zulip,yocome/zulip,swinghu/zulip,jonesgithub/zulip,codeKonami/zulip,RobotCaleb/zulip,brainwane/zulip,mohsenSy/zulip,showell/zulip,rht/zulip,thomasboyt/zulip,kou/zulip,rishig/zulip,vakila/zulip,Jianchun1/zulip,dawran6/zulip,cosmicAsymmetry/zulip,ApsOps/zulip,swinghu/zulip,avastu/zulip,arpitpanwar/zulip,MariaFaBella85/zulip,niftynei/zulip,brainwane/zulip,krtkmj/zulip,tbutter/zulip,krtkmj/zulip,bitemyapp/zulip,KingxBanana/zulip,brockwhittaker/zulip,amallia/zulip,saitodisse/zulip,bitemyapp/zulip,jeffcao/zulip,umkay/zulip,udxxabp/zulip,Diptanshu8/zulip,esander91/zulip,sonali0901/zulip,wangdeshui/zulip,dattatreya303/zulip,sup95/zulip,m1ssou/zulip,dhcrzf/zulip,shrikrishnaholla/zulip,aakash-cr7/zulip,proliming/zulip,Batterfii/zulip,karamcnair/zulip,jphilipsen05/zulip,samatdav/zulip,esander91/zulip,LeeRisk/zulip,luyifan/zulip,wdaher/zulip,bluesea/zulip,wavelets/zulip,DazWorrall/zulip,adnanh/zulip,huangkebo/zulip,kou/zulip,glovebx/zulip,rishig/zulip,firstblade/zulip,dxq-git/zulip,pradiptad/zulip,jessedhillon/zulip,babbage/zulip,proliming/zulip,dhcrzf/zulip,hafeez3000/zulip,JPJPJPOPOP/zulip,nicholasbs/zulip,m1ssou/zulip,cosmicAsymmetry/zulip,themass/zulip,zwily/zulip,EasonYi/zulip,itnihao/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,jphilipsen05/zulip,firstblade/zulip,gigawhitlocks/zulip,PaulPetring/zulip,qq1012803704/zulip,dhcrzf/zulip,wweiradio/zulip,eastlhu/zulip,thomasboyt/zulip,joshisa/zulip,zwily/zulip,tdr130/zulip,Juanvulcano/zulip,saitodisse/zulip,codeKonami/zulip,JPJPJPOPOP/zulip,jeffcao/zulip,zachallaun/zulip,he15his/zulip,eeshangarg/zulip,dawran6/zulip,voidException/zulip,nicholasbs/zulip,LAndreas/zulip,PaulPetring/zulip,xuanhan863/zulip,AZtheAsian/zulip,arpitpanwar/zulip,Vallher/zulip,kokoar/zulip,praveenaki/zulip,AZtheAsian/zulip,proliming/zulip,dnmfarrell/zulip,bowlofstew/zulip,shaunstanislaus/zulip,bitemyapp/zulip,DazWorrall/zulip,guiquanz/zulip,ipernet/zulip,tommyip/zulip,ashwinirudrappa/zulip,aakash-cr7/zulip,eeshangarg/zulip,amanharitsh123/zulip,easyfmxu/zulip,pradiptad/zulip,niftynei/zulip,Juanvulcano/zulip,JPJPJPOPOP/zulip,LAndreas/zulip,calvinleenyc/zulip,jainayush975/zulip,Batterfii/zulip,aliceriot/zulip,DazWorrall/zulip,levixie/zulip,joshisa/zulip,noroot/zulip,willingc/zulip,dhcrzf/zulip,Qgap/zulip,KingxBanana/zulip,shrikrishnaholla/zulip,sharmaeklavya2/zulip,Suninus/zulip,andersk/zulip,dwrpayne/zulip,johnnygaddarr/zulip,hustlzp/zulip,littledogboy/zulip,vakila/zulip,arpith/zulip,PhilSk/zulip,zorojean/zulip,kokoar/zulip,punchagan/zulip,Batterfii/zulip,xuanhan863/zulip,ApsOps/zulip,rht/zulip,wangdeshui/zulip,jessedhillon/zulip,themass/zulip,arpith/zulip,calvinleenyc/zulip,j831/zulip,blaze225/zulip,yuvipanda/zulip,Drooids/zulip,ufosky-server/zulip,dxq-git/zulip,jainayush975/zulip,glovebx/zulip,itnihao/zulip,willingc/zulip,hackerkid/zulip,Cheppers/zulip,zwily/zulip,amallia/zulip,dwrpayne/zulip,christi3k/zulip,ashwinirudrappa/zulip,Frouk/zulip,niftynei/zulip,littledogboy/zulip,synicalsyntax/zulip,adnanh/zulip,ufosky-server/zulip,jrowan/zulip,johnnygaddarr/zulip,Suninus/zulip,huangkebo/zulip,atomic-labs/zulip,rishig/zulip,DazWorrall/zulip,dnmfarrell/zulip,isht3/zulip,joshisa/zulip,vaidap/zulip,amallia/zulip,vabs22/zulip,voidException/zulip,EasonYi/zulip,sharmaeklavya2/zulip,sup95/zulip,guiquanz/zulip,mansilladev/zulip,punchagan/zulip,souravbadami/zulip,swinghu/zulip,kokoar/zulip,seapasulli/zulip,amyliu345/zulip,pradiptad/zulip,Galexrt/zulip,shaunstanislaus/zulip,peiwei/zulip,moria/zulip,voidException/zulip,hafeez3000/zulip,mansilladev/zulip,cosmicAsymmetry/zulip,jerryge/zulip,sup95/zulip,saitodisse/zulip,zofuthan/zulip,jimmy54/zulip,jimmy54/zulip,PaulPetring/zulip,guiquanz/zulip,dwrpayne/zulip,susansls/zulip,ryanbackman/zulip,glovebx/zulip,LAndreas/zulip,MariaFaBella85/zulip,hj3938/zulip,alliejones/zulip,wavelets/zulip,udxxabp/zulip,esander91/zulip,KJin99/zulip,gkotian/zulip,zofuthan/zulip,levixie/zulip,babbage/zulip,luyifan/zulip,ashwinirudrappa/zulip,deer-hope/zulip,hengqujushi/zulip,jeffcao/zulip,wweiradio/zulip,jimmy54/zulip,PhilSk/zulip,gigawhitlocks/zulip,ryansnowboarder/zulip,Frouk/zulip,alliejones/zulip,brainwane/zulip,rishig/zulip,ufosky-server/zulip,dotcool/zulip,eastlhu/zulip,hackerkid/zulip,ipernet/zulip,wangdeshui/zulip,PaulPetring/zulip,zulip/zulip,PhilSk/zulip,mdavid/zulip,sharmaeklavya2/zulip,timabbott/zulip,noroot/zulip,vaidap/zulip,ipernet/zulip,Suninus/zulip,jainayush975/zulip,andersk/zulip,hj3938/zulip,bluesea/zulip,Diptanshu8/zulip,shrikrishnaholla/zulip,punchagan/zulip,amallia/zulip,Cheppers/zulip,jrowan/zulip,aps-sids/zulip,firstblade/zulip,hackerkid/zulip,jonesgithub/zulip,vabs22/zulip,rishig/zulip,hackerkid/zulip,levixie/zulip,wdaher/zulip,levixie/zulip,susansls/zulip,joshisa/zulip,jeffcao/zulip,amyliu345/zulip,amanharitsh123/zulip,amyliu345/zulip,developerfm/zulip,Vallher/zulip,mdavid/zulip,Galexrt/zulip,zwily/zulip,ahmadassaf/zulip,arpith/zulip,zofuthan/zulip,RobotCaleb/zulip,krtkmj/zulip,developerfm/zulip,bitemyapp/zulip,vaidap/zulip,krtkmj/zulip,timabbott/zulip,kaiyuanheshang/zulip,ericzhou2008/zulip,peiwei/zulip,verma-varsha/zulip,lfranchi/zulip,jeffcao/zulip,hj3938/zulip,Diptanshu8/zulip,jphilipsen05/zulip,dnmfarrell/zulip,lfranchi/zulip,codeKonami/zulip,schatt/zulip,LeeRisk/zulip,reyha/zulip,arpitpanwar/zulip,vikas-parashar/zulip,kokoar/zulip,TigorC/zulip,Cheppers/zulip,wangdeshui/zulip,deer-hope/zulip,punchagan/zulip,developerfm/zulip,wweiradio/zulip,ipernet/zulip,Drooids/zulip,willingc/zulip,jonesgithub/zulip,showell/zulip,wavelets/zulip,akuseru/zulip,so0k/zulip,punchagan/zulip,LAndreas/zulip,AZtheAsian/zulip,ApsOps/zulip,KJin99/zulip,JanzTam/zulip,arpith/zulip,umkay/zulip,isht3/zulip,wavelets/zulip,dnmfarrell/zulip,kokoar/zulip,JanzTam/zulip,johnny9/zulip,huangkebo/zulip,timabbott/zulip,RobotCaleb/zulip,ryansnowboarder/zulip,seapasulli/zulip,hengqujushi/zulip,easyfmxu/zulip,rht/zulip,udxxabp/zulip,ryanbackman/zulip,so0k/zulip,avastu/zulip,kaiyuanheshang/zulip,andersk/zulip,saitodisse/zulip,zorojean/zulip,bluesea/zulip,schatt/zulip,reyha/zulip,JanzTam/zulip,amyliu345/zulip,dawran6/zulip,jimmy54/zulip,calvinleenyc/zulip,amanharitsh123/zulip,easyfmxu/zulip,hayderimran7/zulip,samatdav/zulip,jerryge/zulip,tommyip/zulip,zofuthan/zulip,dwrpayne/zulip,tiansiyuan/zulip,karamcnair/zulip,jerryge/zulip,atomic-labs/zulip,zhaoweigg/zulip,dxq-git/zulip,shaunstanislaus/zulip,ikasumiwt/zulip,aps-sids/zulip,j831/zulip,tdr130/zulip,firstblade/zulip,bastianh/zulip,reyha/zulip,vaidap/zulip,lfranchi/zulip,avastu/zulip,adnanh/zulip,tdr130/zulip,moria/zulip,aps-sids/zulip,KingxBanana/zulip,voidException/zulip,samatdav/zulip,nicholasbs/zulip,peiwei/zulip,Juanvulcano/zulip,aliceriot/zulip,tbutter/zulip,adnanh/zulip,joyhchen/zulip,thomasboyt/zulip,EasonYi/zulip,jackrzhang/zulip,ashwinirudrappa/zulip,eeshangarg/zulip,praveenaki/zulip,blaze225/zulip,hengqujushi/zulip,qq1012803704/zulip,karamcnair/zulip,littledogboy/zulip,jessedhillon/zulip,shaunstanislaus/zulip,so0k/zulip,xuxiao/zulip,ipernet/zulip,andersk/zulip,shubhamdhama/zulip,tommyip/zulip,mahim97/zulip,bowlofstew/zulip,christi3k/zulip,kaiyuanheshang/zulip,jerryge/zulip,sonali0901/zulip,brainwane/zulip,wweiradio/zulip,shubhamdhama/zulip,jessedhillon/zulip,shaunstanislaus/zulip,ryansnowboarder/zulip,so0k/zulip,Frouk/zulip,swinghu/zulip,karamcnair/zulip,brainwane/zulip,akuseru/zulip,natanovia/zulip,jimmy54/zulip,rht/zulip,Gabriel0402/zulip,Gabriel0402/zulip,alliejones/zulip,fw1121/zulip,tbutter/zulip,gigawhitlocks/zulip,vikas-parashar/zulip,ryansnowboarder/zulip,saitodisse/zulip,blaze225/zulip,itnihao/zulip,hayderimran7/zulip,mahim97/zulip,brainwane/zulip,ikasumiwt/zulip,hayderimran7/zulip,mdavid/zulip,MayB/zulip,jessedhillon/zulip,shubhamdhama/zulip,KingxBanana/zulip,avastu/zulip,jonesgithub/zulip,ApsOps/zulip,SmartPeople/zulip,kaiyuanheshang/zulip,he15his/zulip,arpitpanwar/zulip,bssrdf/zulip,schatt/zulip,sonali0901/zulip,MayB/zulip,hafeez3000/zulip,umkay/zulip,ericzhou2008/zulip,deer-hope/zulip,hengqujushi/zulip,tbutter/zulip,johnnygaddarr/zulip,verma-varsha/zulip,zhaoweigg/zulip,souravbadami/zulip,wdaher/zulip,paxapy/zulip,johnny9/zulip,huangkebo/zulip,moria/zulip,EasonYi/zulip,j831/zulip,DazWorrall/zulip,Batterfii/zulip,xuanhan863/zulip,babbage/zulip,wweiradio/zulip,zachallaun/zulip,glovebx/zulip,sup95/zulip,firstblade/zulip,AZtheAsian/zulip,technicalpickles/zulip,umkay/zulip,shrikrishnaholla/zulip,shubhamdhama/zulip,zulip/zulip,Galexrt/zulip,technicalpickles/zulip,grave-w-grave/zulip,isht3/zulip,proliming/zulip,showell/zulip,zachallaun/zulip,Jianchun1/zulip,ryanbackman/zulip,jessedhillon/zulip,Gabriel0402/zulip,ApsOps/zulip,shrikrishnaholla/zulip,Juanvulcano/zulip,Vallher/zulip,ryansnowboarder/zulip,kaiyuanheshang/zulip,proliming/zulip,ericzhou2008/zulip,esander91/zulip,gigawhitlocks/zulip,amallia/zulip,paxapy/zulip,m1ssou/zulip,Jianchun1/zulip,suxinde2009/zulip,synicalsyntax/zulip,joshisa/zulip,MariaFaBella85/zulip,timabbott/zulip,ikasumiwt/zulip,yuvipanda/zulip,pradiptad/zulip,levixie/zulip,RobotCaleb/zulip,jainayush975/zulip,hackerkid/zulip,KingxBanana/zulip,dotcool/zulip,hayderimran7/zulip,alliejones/zulip,showell/zulip,hayderimran7/zulip,isht3/zulip,jphilipsen05/zulip,punchagan/zulip,TigorC/zulip,ahmadassaf/zulip,bowlofstew/zulip,akuseru/zulip,themass/zulip,saitodisse/zulip,kokoar/zulip,ufosky-server/zulip,shrikrishnaholla/zulip,luyifan/zulip,willingc/zulip,johnny9/zulip,rishig/zulip,grave-w-grave/zulip,MariaFaBella85/zulip,LAndreas/zulip,JPJPJPOPOP/zulip,jonesgithub/zulip,zorojean/zulip,wdaher/zulip,yuvipanda/zulip,developerfm/zulip,vabs22/zulip,bowlofstew/zulip,qq1012803704/zulip,nicholasbs/zulip,j831/zulip,gkotian/zulip,peiwei/zulip,gkotian/zulip,johnny9/zulip,shubhamdhama/zulip,wavelets/zulip,gigawhitlocks/zulip,wangdeshui/zulip,LAndreas/zulip,hengqujushi/zulip,bastianh/zulip,m1ssou/zulip,Galexrt/zulip,brockwhittaker/zulip,gigawhitlocks/zulip,qq1012803704/zulip,ufosky-server/zulip,Cheppers/zulip,Drooids/zulip,calvinleenyc/zulip,luyifan/zulip,zhaoweigg/zulip,bastianh/zulip,deer-hope/zulip,Vallher/zulip,Suninus/zulip,joyhchen/zulip,MayB/zulip,zofuthan/zulip,praveenaki/zulip,themass/zulip,tiansiyuan/zulip,brockwhittaker/zulip,Drooids/zulip,tdr130/zulip,yocome/zulip,aps-sids/zulip,aakash-cr7/zulip,dwrpayne/zulip,bitemyapp/zulip,vaidap/zulip,dattatreya303/zulip,eastlhu/zulip,zulip/zulip,codeKonami/zulip,mahim97/zulip,zwily/zulip,joshisa/zulip,arpitpanwar/zulip,swinghu/zulip,KJin99/zulip,ipernet/zulip,tommyip/zulip,kou/zulip,PaulPetring/zulip,jainayush975/zulip,tdr130/zulip,udxxabp/zulip,natanovia/zulip,developerfm/zulip,bastianh/zulip,Vallher/zulip,hayderimran7/zulip,rht/zulip,zachallaun/zulip,armooo/zulip,zulip/zulip,seapasulli/zulip,kaiyuanheshang/zulip,dotcool/zulip,gigawhitlocks/zulip,yuvipanda/zulip,hayderimran7/zulip,xuanhan863/zulip,ryanbackman/zulip,zachallaun/zulip,jrowan/zulip,umkay/zulip,susansls/zulip,jphilipsen05/zulip,lfranchi/zulip,yocome/zulip,sonali0901/zulip,Batterfii/zulip,mansilladev/zulip,jphilipsen05/zulip,eeshangarg/zulip,adnanh/zulip,souravbadami/zulip,guiquanz/zulip,stamhe/zulip,aliceriot/zulip,gkotian/zulip,zofuthan/zulip,sonali0901/zulip,Qgap/zulip,blaze225/zulip,kou/zulip,suxinde2009/zulip,ApsOps/zulip,zhaoweigg/zulip,peiwei/zulip,zorojean/zulip,dhcrzf/zulip,dwrpayne/zulip,zwily/zulip,bssrdf/zulip,pradiptad/zulip,esander91/zulip,tiansiyuan/zulip,jrowan/zulip,Drooids/zulip,xuanhan863/zulip,zacps/zulip,amanharitsh123/zulip,tommyip/zulip,ApsOps/zulip,mohsenSy/zulip,ikasumiwt/zulip,itnihao/zulip,nicholasbs/zulip,Qgap/zulip,codeKonami/zulip,TigorC/zulip,samatdav/zulip,noroot/zulip,KJin99/zulip,Galexrt/zulip,Jianchun1/zulip,mohsenSy/zulip,Cheppers/zulip,KJin99/zulip,akuseru/zulip,timabbott/zulip,technicalpickles/zulip,souravbadami/zulip,Diptanshu8/zulip,praveenaki/zulip,tbutter/zulip,armooo/zulip,schatt/zulip,gkotian/zulip,moria/zulip,brockwhittaker/zulip,LAndreas/zulip,dxq-git/zulip,LeeRisk/zulip,andersk/zulip,Frouk/zulip,wangdeshui/zulip,andersk/zulip,synicalsyntax/zulip,zacps/zulip,JanzTam/zulip,Qgap/zulip,guiquanz/zulip,suxinde2009/zulip,DazWorrall/zulip,luyifan/zulip,reyha/zulip,paxapy/zulip,saitodisse/zulip,Jianchun1/zulip,atomic-labs/zulip,dawran6/zulip,mdavid/zulip,easyfmxu/zulip,zofuthan/zulip,praveenaki/zulip,eastlhu/zulip,easyfmxu/zulip,xuxiao/zulip,PaulPetring/zulip,MariaFaBella85/zulip,fw1121/zulip,luyifan/zulip,willingc/zulip,deer-hope/zulip,suxinde2009/zulip,eastlhu/zulip,aps-sids/zulip
|
Add simple function for outputting statistics for use with munin
Eventually we will want to replace this with statsd.
(imported from commit 64246e9f2d13d72f53d009a5e3e456bc6be6296b)
|
import os
import logging
STATS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "stats")
def update_stat(name, value):
try:
os.mkdir(STATS_DIR)
except OSError:
pass
base_filename = os.path.join(STATS_DIR, name)
tmp_filename = base_filename + ".new"
try:
with file(tmp_filename, "w") as stat_file:
stat_file.write("%s\n" % (str(value),))
os.rename(tmp_filename, base_filename)
except (OSError, IOError) as e:
logging.info("Could not update statistic '%s': %s" % (name, e))
|
<commit_before><commit_msg>Add simple function for outputting statistics for use with munin
Eventually we will want to replace this with statsd.
(imported from commit 64246e9f2d13d72f53d009a5e3e456bc6be6296b)<commit_after>
|
import os
import logging
STATS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "stats")
def update_stat(name, value):
try:
os.mkdir(STATS_DIR)
except OSError:
pass
base_filename = os.path.join(STATS_DIR, name)
tmp_filename = base_filename + ".new"
try:
with file(tmp_filename, "w") as stat_file:
stat_file.write("%s\n" % (str(value),))
os.rename(tmp_filename, base_filename)
except (OSError, IOError) as e:
logging.info("Could not update statistic '%s': %s" % (name, e))
|
Add simple function for outputting statistics for use with munin
Eventually we will want to replace this with statsd.
(imported from commit 64246e9f2d13d72f53d009a5e3e456bc6be6296b)import os
import logging
STATS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "stats")
def update_stat(name, value):
try:
os.mkdir(STATS_DIR)
except OSError:
pass
base_filename = os.path.join(STATS_DIR, name)
tmp_filename = base_filename + ".new"
try:
with file(tmp_filename, "w") as stat_file:
stat_file.write("%s\n" % (str(value),))
os.rename(tmp_filename, base_filename)
except (OSError, IOError) as e:
logging.info("Could not update statistic '%s': %s" % (name, e))
|
<commit_before><commit_msg>Add simple function for outputting statistics for use with munin
Eventually we will want to replace this with statsd.
(imported from commit 64246e9f2d13d72f53d009a5e3e456bc6be6296b)<commit_after>import os
import logging
STATS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "stats")
def update_stat(name, value):
try:
os.mkdir(STATS_DIR)
except OSError:
pass
base_filename = os.path.join(STATS_DIR, name)
tmp_filename = base_filename + ".new"
try:
with file(tmp_filename, "w") as stat_file:
stat_file.write("%s\n" % (str(value),))
os.rename(tmp_filename, base_filename)
except (OSError, IOError) as e:
logging.info("Could not update statistic '%s': %s" % (name, e))
|
|
561fa74a6e2572df913140a0cf9f57857a9eaddd
|
leaderboard/contributors/migrations/0007_auto_20151113_2229.py
|
leaderboard/contributors/migrations/0007_auto_20151113_2229.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contributors', '0006_auto_20151013_2148'),
]
operations = [
migrations.AlterModelOptions(
name='contributorrank',
options={'ordering': ('rank',)},
),
]
|
Set default ordering for ContributorRank
|
Set default ordering for ContributorRank
|
Python
|
mpl-2.0
|
JaredKerim-Mozilla/leaderboard-server,JaredKerim-Mozilla/leaderboard-server,mozilla-services/location-leaderboard,mozilla-services/location-leaderboard,mozilla-services/location-leaderboard,mozilla-services/location-leaderboard,JaredKerim-Mozilla/leaderboard-server,JaredKerim-Mozilla/leaderboard-server,mozilla-services/location-leaderboard,JaredKerim-Mozilla/leaderboard-server
|
Set default ordering for ContributorRank
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contributors', '0006_auto_20151013_2148'),
]
operations = [
migrations.AlterModelOptions(
name='contributorrank',
options={'ordering': ('rank',)},
),
]
|
<commit_before><commit_msg>Set default ordering for ContributorRank<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contributors', '0006_auto_20151013_2148'),
]
operations = [
migrations.AlterModelOptions(
name='contributorrank',
options={'ordering': ('rank',)},
),
]
|
Set default ordering for ContributorRank# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contributors', '0006_auto_20151013_2148'),
]
operations = [
migrations.AlterModelOptions(
name='contributorrank',
options={'ordering': ('rank',)},
),
]
|
<commit_before><commit_msg>Set default ordering for ContributorRank<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contributors', '0006_auto_20151013_2148'),
]
operations = [
migrations.AlterModelOptions(
name='contributorrank',
options={'ordering': ('rank',)},
),
]
|
|
2f0145b59be4a251b31223d44fdca7f3fb211fa8
|
svm_train.py
|
svm_train.py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.5)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("svm_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42).fit(train_tfidf, training_targets)
save_clf = open("svm_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Support Vector Machine vectorizer and classifier pickles
|
Create Support Vector Machine vectorizer and classifier pickles
|
Python
|
mit
|
npentella/CuriousCorpus,npentella/CuriousCorpus,npentella/CuriousCorpus
|
Create Support Vector Machine vectorizer and classifier pickles
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.5)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("svm_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42).fit(train_tfidf, training_targets)
save_clf = open("svm_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Support Vector Machine vectorizer and classifier pickles<commit_after>
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.5)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("svm_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42).fit(train_tfidf, training_targets)
save_clf = open("svm_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Support Vector Machine vectorizer and classifier picklesfrom sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.5)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("svm_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42).fit(train_tfidf, training_targets)
save_clf = open("svm_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Support Vector Machine vectorizer and classifier pickles<commit_after>from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500, \
min_df = 5, \
max_df = 0.5)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("svm_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42).fit(train_tfidf, training_targets)
save_clf = open("svm_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
|
c0685e9257af79e9ea2393611e7ad00a00a3734e
|
pyweaving/tests/test_draft.py
|
pyweaving/tests/test_draft.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from .. import Draft, Color
class TestDraft(TestCase):
def test_basic_tabby(self):
draft = Draft(num_shafts=2, num_treadles=2)
black = Color((0, 0, 0))
draft.add_warp_thread(
color=black,
shaft=0,
)
draft.add_warp_thread(
color=black,
shaft=1,
)
draft.add_weft_thread(
color=black,
shafts=[0],
)
draft.add_weft_thread(
color=black,
shafts=[1],
)
|
Add a trivially basic test
|
Add a trivially basic test
|
Python
|
mit
|
storborg/pyweaving
|
Add a trivially basic test
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from .. import Draft, Color
class TestDraft(TestCase):
def test_basic_tabby(self):
draft = Draft(num_shafts=2, num_treadles=2)
black = Color((0, 0, 0))
draft.add_warp_thread(
color=black,
shaft=0,
)
draft.add_warp_thread(
color=black,
shaft=1,
)
draft.add_weft_thread(
color=black,
shafts=[0],
)
draft.add_weft_thread(
color=black,
shafts=[1],
)
|
<commit_before><commit_msg>Add a trivially basic test<commit_after>
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from .. import Draft, Color
class TestDraft(TestCase):
def test_basic_tabby(self):
draft = Draft(num_shafts=2, num_treadles=2)
black = Color((0, 0, 0))
draft.add_warp_thread(
color=black,
shaft=0,
)
draft.add_warp_thread(
color=black,
shaft=1,
)
draft.add_weft_thread(
color=black,
shafts=[0],
)
draft.add_weft_thread(
color=black,
shafts=[1],
)
|
Add a trivially basic testfrom __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from .. import Draft, Color
class TestDraft(TestCase):
def test_basic_tabby(self):
draft = Draft(num_shafts=2, num_treadles=2)
black = Color((0, 0, 0))
draft.add_warp_thread(
color=black,
shaft=0,
)
draft.add_warp_thread(
color=black,
shaft=1,
)
draft.add_weft_thread(
color=black,
shafts=[0],
)
draft.add_weft_thread(
color=black,
shafts=[1],
)
|
<commit_before><commit_msg>Add a trivially basic test<commit_after>from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from .. import Draft, Color
class TestDraft(TestCase):
def test_basic_tabby(self):
draft = Draft(num_shafts=2, num_treadles=2)
black = Color((0, 0, 0))
draft.add_warp_thread(
color=black,
shaft=0,
)
draft.add_warp_thread(
color=black,
shaft=1,
)
draft.add_weft_thread(
color=black,
shafts=[0],
)
draft.add_weft_thread(
color=black,
shafts=[1],
)
|
|
41fcb60868f0551603cf25c8616f942fcfd008f4
|
scripts/restore_node.py
|
scripts/restore_node.py
|
"""Restores a deleted node.
NOTE: Only use this for nodes that have no addons except for OSFStorage and Wiki.
"""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def hide_deleted_logs(node):
for log in node.logs:
if log.action in {NodeLog.PROJECT_DELETED, NodeLog.NODE_REMOVED}:
logger.info('Hiding log {}'.format(log._id))
log.should_hide = True
log.save()
def restore_node(node):
logger.info('Restoring node {}'.format(node._id))
assert set([e.config.short_name for e in node.get_addons()]) == {'osfstorage', 'wiki'}
node.is_deleted = False
node.deleted_date = None
hide_deleted_logs(node)
node.save()
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
node_id = sys.argv[1]
node = Node.load(node_id)
if not node:
logger.error('Node "{}" not found'.format(node_id))
sys.exit(1)
with TokuTransaction():
for each in node.node_and_primary_descendants():
restore_node(each)
if dry_run:
raise Exception('Dry Run -- Aborting Transaction')
logger.info('Finished restoring node {}'.format(node_id))
if __name__ == '__main__':
main()
|
Add script to restore a deleted node
|
Add script to restore a deleted node
|
Python
|
apache-2.0
|
alexschiller/osf.io,cwisecarver/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,leb2dg/osf.io,emetsger/osf.io,chrisseto/osf.io,caneruguz/osf.io,binoculars/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,mluo613/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,laurenrevere/osf.io,sloria/osf.io,sloria/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,sloria/osf.io,aaxelb/osf.io,adlius/osf.io,caseyrollins/osf.io,Nesiehr/osf.io,binoculars/osf.io,acshi/osf.io,cwisecarver/osf.io,chrisseto/osf.io,baylee-d/osf.io,alexschiller/osf.io,erinspace/osf.io,monikagrabowska/osf.io,alexschiller/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,felliott/osf.io,mluo613/osf.io,mattclark/osf.io,mfraezz/osf.io,alexschiller/osf.io,acshi/osf.io,hmoco/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,mfraezz/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,emetsger/osf.io,mluo613/osf.io,chennan47/osf.io,icereval/osf.io,rdhyee/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,rdhyee/osf.io,pattisdr/osf.io,monikagrabowska/osf.io,caseyrollins/osf.io,aaxelb/osf.io,adlius/osf.io,aaxelb/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,chennan47/osf.io,leb2dg/osf.io,rdhyee/osf.io,pattisdr/osf.io,caneruguz/osf.io,emetsger/osf.io,adlius/osf.io,Nesiehr/osf.io,chrisseto/osf.io,acshi/osf.io,binoculars/osf.io,mfraezz/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,erinspace/osf.io,hmoco/osf.io,saradbowman/osf.io,acshi/osf.io,rdhyee/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,adlius/osf.io,monikagrabowska/osf.io,pattisdr/osf.io,cwisecarver/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,Nesiehr/osf.io,cslzchen/osf.io,laurenrevere/osf.io,acshi/osf.io,cslzchen/osf.io,crcresearch/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,felliott/osf.io,chennan47/osf.io,cslzchen/osf.io,caseyrollins/osf.io,icereval/osf.io,chrisseto/osf.io,mattclark/osf.io,mluo613/osf.io,leb2dg/osf.io
|
Add script to restore a deleted node
|
"""Restores a deleted node.
NOTE: Only use this for nodes that have no addons except for OSFStorage and Wiki.
"""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def hide_deleted_logs(node):
for log in node.logs:
if log.action in {NodeLog.PROJECT_DELETED, NodeLog.NODE_REMOVED}:
logger.info('Hiding log {}'.format(log._id))
log.should_hide = True
log.save()
def restore_node(node):
logger.info('Restoring node {}'.format(node._id))
assert set([e.config.short_name for e in node.get_addons()]) == {'osfstorage', 'wiki'}
node.is_deleted = False
node.deleted_date = None
hide_deleted_logs(node)
node.save()
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
node_id = sys.argv[1]
node = Node.load(node_id)
if not node:
logger.error('Node "{}" not found'.format(node_id))
sys.exit(1)
with TokuTransaction():
for each in node.node_and_primary_descendants():
restore_node(each)
if dry_run:
raise Exception('Dry Run -- Aborting Transaction')
logger.info('Finished restoring node {}'.format(node_id))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to restore a deleted node<commit_after>
|
"""Restores a deleted node.
NOTE: Only use this for nodes that have no addons except for OSFStorage and Wiki.
"""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def hide_deleted_logs(node):
for log in node.logs:
if log.action in {NodeLog.PROJECT_DELETED, NodeLog.NODE_REMOVED}:
logger.info('Hiding log {}'.format(log._id))
log.should_hide = True
log.save()
def restore_node(node):
logger.info('Restoring node {}'.format(node._id))
assert set([e.config.short_name for e in node.get_addons()]) == {'osfstorage', 'wiki'}
node.is_deleted = False
node.deleted_date = None
hide_deleted_logs(node)
node.save()
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
node_id = sys.argv[1]
node = Node.load(node_id)
if not node:
logger.error('Node "{}" not found'.format(node_id))
sys.exit(1)
with TokuTransaction():
for each in node.node_and_primary_descendants():
restore_node(each)
if dry_run:
raise Exception('Dry Run -- Aborting Transaction')
logger.info('Finished restoring node {}'.format(node_id))
if __name__ == '__main__':
main()
|
Add script to restore a deleted node"""Restores a deleted node.
NOTE: Only use this for nodes that have no addons except for OSFStorage and Wiki.
"""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def hide_deleted_logs(node):
for log in node.logs:
if log.action in {NodeLog.PROJECT_DELETED, NodeLog.NODE_REMOVED}:
logger.info('Hiding log {}'.format(log._id))
log.should_hide = True
log.save()
def restore_node(node):
logger.info('Restoring node {}'.format(node._id))
assert set([e.config.short_name for e in node.get_addons()]) == {'osfstorage', 'wiki'}
node.is_deleted = False
node.deleted_date = None
hide_deleted_logs(node)
node.save()
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
node_id = sys.argv[1]
node = Node.load(node_id)
if not node:
logger.error('Node "{}" not found'.format(node_id))
sys.exit(1)
with TokuTransaction():
for each in node.node_and_primary_descendants():
restore_node(each)
if dry_run:
raise Exception('Dry Run -- Aborting Transaction')
logger.info('Finished restoring node {}'.format(node_id))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to restore a deleted node<commit_after>"""Restores a deleted node.
NOTE: Only use this for nodes that have no addons except for OSFStorage and Wiki.
"""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def hide_deleted_logs(node):
for log in node.logs:
if log.action in {NodeLog.PROJECT_DELETED, NodeLog.NODE_REMOVED}:
logger.info('Hiding log {}'.format(log._id))
log.should_hide = True
log.save()
def restore_node(node):
logger.info('Restoring node {}'.format(node._id))
assert set([e.config.short_name for e in node.get_addons()]) == {'osfstorage', 'wiki'}
node.is_deleted = False
node.deleted_date = None
hide_deleted_logs(node)
node.save()
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
node_id = sys.argv[1]
node = Node.load(node_id)
if not node:
logger.error('Node "{}" not found'.format(node_id))
sys.exit(1)
with TokuTransaction():
for each in node.node_and_primary_descendants():
restore_node(each)
if dry_run:
raise Exception('Dry Run -- Aborting Transaction')
logger.info('Finished restoring node {}'.format(node_id))
if __name__ == '__main__':
main()
|
|
2e73497a0f1a417f1b047b3857bd72117c2a33d3
|
closure_default_parameter.py
|
closure_default_parameter.py
|
#!/usr/bin/env python3
def create_multipliers_late_binding():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with late binding effect. """
return [lambda x: i * x for i in range(5)]
def create_multipliers():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with default parameters(i = i) to prevent late binding effect. """
return [lambda x, i = i: i * x for i in range(5)]
if __name__ == "__main__":
for mult in create_multipliers_late_binding():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
for mult in create_multipliers():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
|
Use closue default parameter for set arguments correctly in list comprehension.
|
Use closue default parameter for set arguments correctly in list comprehension.
|
Python
|
apache-2.0
|
sjh/python
|
Use closue default parameter for set arguments correctly in list comprehension.
|
#!/usr/bin/env python3
def create_multipliers_late_binding():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with late binding effect. """
return [lambda x: i * x for i in range(5)]
def create_multipliers():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with default parameters(i = i) to prevent late binding effect. """
return [lambda x, i = i: i * x for i in range(5)]
if __name__ == "__main__":
for mult in create_multipliers_late_binding():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
for mult in create_multipliers():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
|
<commit_before><commit_msg>Use closue default parameter for set arguments correctly in list comprehension.<commit_after>
|
#!/usr/bin/env python3
def create_multipliers_late_binding():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with late binding effect. """
return [lambda x: i * x for i in range(5)]
def create_multipliers():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with default parameters(i = i) to prevent late binding effect. """
return [lambda x, i = i: i * x for i in range(5)]
if __name__ == "__main__":
for mult in create_multipliers_late_binding():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
for mult in create_multipliers():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
|
Use closue default parameter for set arguments correctly in list comprehension.#!/usr/bin/env python3
def create_multipliers_late_binding():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with late binding effect. """
return [lambda x: i * x for i in range(5)]
def create_multipliers():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with default parameters(i = i) to prevent late binding effect. """
return [lambda x, i = i: i * x for i in range(5)]
if __name__ == "__main__":
for mult in create_multipliers_late_binding():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
for mult in create_multipliers():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
|
<commit_before><commit_msg>Use closue default parameter for set arguments correctly in list comprehension.<commit_after>#!/usr/bin/env python3
def create_multipliers_late_binding():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with late binding effect. """
return [lambda x: i * x for i in range(5)]
def create_multipliers():
""" https://www.toptal.com/python/top-10-mistakes-that-python-programmers-make create closure
with default parameters(i = i) to prevent late binding effect. """
return [lambda x, i = i: i * x for i in range(5)]
if __name__ == "__main__":
for mult in create_multipliers_late_binding():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
for mult in create_multipliers():
print(mult.__name__)
print(mult.__defaults__)
print(mult(2))
|
|
dc26ebcd2437f4ade2a0692ebe79af0f4f664097
|
elpiwear/Rpi/gpio.py
|
elpiwear/Rpi/gpio.py
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Frederic Jacob
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Simple proxy class for access of the GPIO bus on the Raspberry Pi.
#
import RPi.GPIO as GPIO
IN = GPIO.IN
OUT = GPIO.OUT
class gpio:
count = 0
def __init__(self, pin, direction):
gpio.count = gpio.count + 1
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, direction)
def __del__(self):
gpio.count = gpio.count - 1
if gpio.count == 0:
GPIO.cleanup()
def input(self):
return GPIO.input(self.pin)
def output(self, value):
GPIO.output(self.pin, value)
def on(self):
GPIO.output(self.pin, 1)
def off(self):
GPIO.output(self.pin, 0)
|
Add the GPIO proxy class for the Raspberry Pi
|
Add the GPIO proxy class for the Raspberry Pi
|
Python
|
mit
|
fjacob21/pycon2015
|
Add the GPIO proxy class for the Raspberry Pi
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Frederic Jacob
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Simple proxy class for access of the GPIO bus on the Raspberry Pi.
#
import RPi.GPIO as GPIO
IN = GPIO.IN
OUT = GPIO.OUT
class gpio:
count = 0
def __init__(self, pin, direction):
gpio.count = gpio.count + 1
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, direction)
def __del__(self):
gpio.count = gpio.count - 1
if gpio.count == 0:
GPIO.cleanup()
def input(self):
return GPIO.input(self.pin)
def output(self, value):
GPIO.output(self.pin, value)
def on(self):
GPIO.output(self.pin, 1)
def off(self):
GPIO.output(self.pin, 0)
|
<commit_before><commit_msg>Add the GPIO proxy class for the Raspberry Pi<commit_after>
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Frederic Jacob
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Simple proxy class for access of the GPIO bus on the Raspberry Pi.
#
import RPi.GPIO as GPIO
IN = GPIO.IN
OUT = GPIO.OUT
class gpio:
count = 0
def __init__(self, pin, direction):
gpio.count = gpio.count + 1
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, direction)
def __del__(self):
gpio.count = gpio.count - 1
if gpio.count == 0:
GPIO.cleanup()
def input(self):
return GPIO.input(self.pin)
def output(self, value):
GPIO.output(self.pin, value)
def on(self):
GPIO.output(self.pin, 1)
def off(self):
GPIO.output(self.pin, 0)
|
Add the GPIO proxy class for the Raspberry Pi# The MIT License (MIT)
#
# Copyright (c) 2015 Frederic Jacob
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Simple proxy class for access of the GPIO bus on the Raspberry Pi.
#
import RPi.GPIO as GPIO
IN = GPIO.IN
OUT = GPIO.OUT
class gpio:
count = 0
def __init__(self, pin, direction):
gpio.count = gpio.count + 1
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, direction)
def __del__(self):
gpio.count = gpio.count - 1
if gpio.count == 0:
GPIO.cleanup()
def input(self):
return GPIO.input(self.pin)
def output(self, value):
GPIO.output(self.pin, value)
def on(self):
GPIO.output(self.pin, 1)
def off(self):
GPIO.output(self.pin, 0)
|
<commit_before><commit_msg>Add the GPIO proxy class for the Raspberry Pi<commit_after># The MIT License (MIT)
#
# Copyright (c) 2015 Frederic Jacob
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Simple proxy class for access of the GPIO bus on the Raspberry Pi.
#
import RPi.GPIO as GPIO
IN = GPIO.IN
OUT = GPIO.OUT
class gpio:
count = 0
def __init__(self, pin, direction):
gpio.count = gpio.count + 1
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, direction)
def __del__(self):
gpio.count = gpio.count - 1
if gpio.count == 0:
GPIO.cleanup()
def input(self):
return GPIO.input(self.pin)
def output(self, value):
GPIO.output(self.pin, value)
def on(self):
GPIO.output(self.pin, 1)
def off(self):
GPIO.output(self.pin, 0)
|
|
aff608fcd354bad4a2637f6cad6a6f90b1971bf8
|
test_echo.py
|
test_echo.py
|
# -*- coding: utf-8 -*-
import subprocess
import pytest
def test_basic(string="This is a test."):
process = subprocess.Popen(['./echo_client.py', string],
stdout=subprocess.PIPE)
assert string == process.stdout.readline()
def test_unicode():
with pytest.raises(AssertionError):
inp = 'Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20 off!'
inp = inp.decode('utf-8')
process = subprocess.Popen(['./echo_client.py', inp],
stdout=subprocess.PIPE)
assert inp == process.stdout.readline()
def test_long():
test_basic("Running the server script in one terminal should allow you to run \
the client script in a separate terminal. The client script should\
take an argument which is the message to send. Upon completing, the\
response from the server should be printed to stdout.")
def test_exact():
test_basic("input me")
|
Write tests for socket: short, long and exact.
|
Write tests for socket: short, long and exact.
|
Python
|
mit
|
bm5w/network_tools
|
Write tests for socket: short, long and exact.
|
# -*- coding: utf-8 -*-
import subprocess
import pytest
def test_basic(string="This is a test."):
process = subprocess.Popen(['./echo_client.py', string],
stdout=subprocess.PIPE)
assert string == process.stdout.readline()
def test_unicode():
with pytest.raises(AssertionError):
inp = 'Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20 off!'
inp = inp.decode('utf-8')
process = subprocess.Popen(['./echo_client.py', inp],
stdout=subprocess.PIPE)
assert inp == process.stdout.readline()
def test_long():
test_basic("Running the server script in one terminal should allow you to run \
the client script in a separate terminal. The client script should\
take an argument which is the message to send. Upon completing, the\
response from the server should be printed to stdout.")
def test_exact():
test_basic("input me")
|
<commit_before><commit_msg>Write tests for socket: short, long and exact.<commit_after>
|
# -*- coding: utf-8 -*-
import subprocess
import pytest
def test_basic(string="This is a test."):
process = subprocess.Popen(['./echo_client.py', string],
stdout=subprocess.PIPE)
assert string == process.stdout.readline()
def test_unicode():
with pytest.raises(AssertionError):
inp = 'Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20 off!'
inp = inp.decode('utf-8')
process = subprocess.Popen(['./echo_client.py', inp],
stdout=subprocess.PIPE)
assert inp == process.stdout.readline()
def test_long():
test_basic("Running the server script in one terminal should allow you to run \
the client script in a separate terminal. The client script should\
take an argument which is the message to send. Upon completing, the\
response from the server should be printed to stdout.")
def test_exact():
test_basic("input me")
|
Write tests for socket: short, long and exact.# -*- coding: utf-8 -*-
import subprocess
import pytest
def test_basic(string="This is a test."):
process = subprocess.Popen(['./echo_client.py', string],
stdout=subprocess.PIPE)
assert string == process.stdout.readline()
def test_unicode():
with pytest.raises(AssertionError):
inp = 'Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20 off!'
inp = inp.decode('utf-8')
process = subprocess.Popen(['./echo_client.py', inp],
stdout=subprocess.PIPE)
assert inp == process.stdout.readline()
def test_long():
test_basic("Running the server script in one terminal should allow you to run \
the client script in a separate terminal. The client script should\
take an argument which is the message to send. Upon completing, the\
response from the server should be printed to stdout.")
def test_exact():
test_basic("input me")
|
<commit_before><commit_msg>Write tests for socket: short, long and exact.<commit_after># -*- coding: utf-8 -*-
import subprocess
import pytest
def test_basic(string="This is a test."):
process = subprocess.Popen(['./echo_client.py', string],
stdout=subprocess.PIPE)
assert string == process.stdout.readline()
def test_unicode():
with pytest.raises(AssertionError):
inp = 'Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20 off!'
inp = inp.decode('utf-8')
process = subprocess.Popen(['./echo_client.py', inp],
stdout=subprocess.PIPE)
assert inp == process.stdout.readline()
def test_long():
test_basic("Running the server script in one terminal should allow you to run \
the client script in a separate terminal. The client script should\
take an argument which is the message to send. Upon completing, the\
response from the server should be printed to stdout.")
def test_exact():
test_basic("input me")
|
|
9bce03d89dad6b69a88632d95988fc42af19557a
|
st2common/st2common/util/versioning.py
|
st2common/st2common/util/versioning.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various versioning utils.
"""
import semver
__all__ = [
'complex_semver_match'
]
def complex_semver_match(version, version_specifier):
"""
Custom semver match function which also supports complex semver specifiers
such as >=1.6, <2.0, etc.
:rtype: ``bool``
"""
split_version_specifier = version_specifier.split(',')
if len(split_version_specifier) == 1:
# No comma, we can do a simple comparision
return semver.match(version, version_specifier)
else:
# Compare part by part
for version_specifier_part in split_version_specifier:
version_specifier_part = version_specifier_part.strip()
if not semver.match(version, version_specifier_part):
return False
return True
|
Add custom uility function which knows how to compare and match complex semver version specifiers.
|
Add custom uility function which knows how to compare and match complex semver
version specifiers.
|
Python
|
apache-2.0
|
nzlosh/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,tonybaloney/st2,StackStorm/st2,Plexxi/st2,peak6/st2,Plexxi/st2,lakshmi-kannan/st2,lakshmi-kannan/st2,tonybaloney/st2,Plexxi/st2,peak6/st2,tonybaloney/st2,peak6/st2,StackStorm/st2,lakshmi-kannan/st2,nzlosh/st2,StackStorm/st2,nzlosh/st2
|
Add custom uility function which knows how to compare and match complex semver
version specifiers.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various versioning utils.
"""
import semver
__all__ = [
'complex_semver_match'
]
def complex_semver_match(version, version_specifier):
"""
Custom semver match function which also supports complex semver specifiers
such as >=1.6, <2.0, etc.
:rtype: ``bool``
"""
split_version_specifier = version_specifier.split(',')
if len(split_version_specifier) == 1:
# No comma, we can do a simple comparision
return semver.match(version, version_specifier)
else:
# Compare part by part
for version_specifier_part in split_version_specifier:
version_specifier_part = version_specifier_part.strip()
if not semver.match(version, version_specifier_part):
return False
return True
|
<commit_before><commit_msg>Add custom uility function which knows how to compare and match complex semver
version specifiers.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various versioning utils.
"""
import semver
__all__ = [
'complex_semver_match'
]
def complex_semver_match(version, version_specifier):
"""
Custom semver match function which also supports complex semver specifiers
such as >=1.6, <2.0, etc.
:rtype: ``bool``
"""
split_version_specifier = version_specifier.split(',')
if len(split_version_specifier) == 1:
# No comma, we can do a simple comparision
return semver.match(version, version_specifier)
else:
# Compare part by part
for version_specifier_part in split_version_specifier:
version_specifier_part = version_specifier_part.strip()
if not semver.match(version, version_specifier_part):
return False
return True
|
Add custom uility function which knows how to compare and match complex semver
version specifiers.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various versioning utils.
"""
import semver
__all__ = [
'complex_semver_match'
]
def complex_semver_match(version, version_specifier):
"""
Custom semver match function which also supports complex semver specifiers
such as >=1.6, <2.0, etc.
:rtype: ``bool``
"""
split_version_specifier = version_specifier.split(',')
if len(split_version_specifier) == 1:
# No comma, we can do a simple comparision
return semver.match(version, version_specifier)
else:
# Compare part by part
for version_specifier_part in split_version_specifier:
version_specifier_part = version_specifier_part.strip()
if not semver.match(version, version_specifier_part):
return False
return True
|
<commit_before><commit_msg>Add custom uility function which knows how to compare and match complex semver
version specifiers.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various versioning utils.
"""
import semver
__all__ = [
'complex_semver_match'
]
def complex_semver_match(version, version_specifier):
"""
Custom semver match function which also supports complex semver specifiers
such as >=1.6, <2.0, etc.
:rtype: ``bool``
"""
split_version_specifier = version_specifier.split(',')
if len(split_version_specifier) == 1:
# No comma, we can do a simple comparision
return semver.match(version, version_specifier)
else:
# Compare part by part
for version_specifier_part in split_version_specifier:
version_specifier_part = version_specifier_part.strip()
if not semver.match(version, version_specifier_part):
return False
return True
|
|
7903336bf1190814c04f73ade3e19f0ed5fadef4
|
icekit/management/commands/truncate_postgres_dbdump.py
|
icekit/management/commands/truncate_postgres_dbdump.py
|
import sys # Needed to output unicode see https://code.djangoproject.com/ticket/21933
import re
from django.core.management.base import BaseCommand, CommandError
COPY_TABLENAME_RE = re.compile(r'^COPY ([^ ]+) .* FROM stdin;$')
class Command(BaseCommand):
help = ("Truncate unwanted table data in PostgreSQL DB dump files."
" Output is written to STDOUT")
args = '<pg_dump.sql> <table_to_truncate> [<table2_to_truncate>...]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"The <pg_dump.sql> and at least one <table_to_truncate>"
" arguments are required")
filepath = args[0]
tables_to_truncate = args[1:]
truncating = False
with open(filepath, 'rb') as infile:
for line in infile:
# If we are truncating a COPY block, stop doing so when we
# reach the end-of-COPY marker '\.'
if truncating:
if line.startswith('\.'):
truncating = False
else:
continue
# If we encounter a COPY block...
if line.startswith('COPY '):
# ...and the table name matches one we wish to truncate...
match = COPY_TABLENAME_RE.match(line)
tablename = match and match.group(1)
# ...print a comment and start truncating lines
if tablename in tables_to_truncate:
truncating = True
sys.stdout.write("--- TRUNCATED DATA ---\n")
# Echo most file lines to output, unless they are skipped
# during truncation above.
sys.stdout.write(line)
|
Add command to truncate table data in PostgreSQL db dump files
|
Add command to truncate table data in PostgreSQL db dump files
Example usage:
manage.py truncate_postgres_dbdump \
big.sql big_table_name1 big_table_name2 \
> small.sql
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit
|
Add command to truncate table data in PostgreSQL db dump files
Example usage:
manage.py truncate_postgres_dbdump \
big.sql big_table_name1 big_table_name2 \
> small.sql
|
import sys # Needed to output unicode see https://code.djangoproject.com/ticket/21933
import re
from django.core.management.base import BaseCommand, CommandError
COPY_TABLENAME_RE = re.compile(r'^COPY ([^ ]+) .* FROM stdin;$')
class Command(BaseCommand):
help = ("Truncate unwanted table data in PostgreSQL DB dump files."
" Output is written to STDOUT")
args = '<pg_dump.sql> <table_to_truncate> [<table2_to_truncate>...]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"The <pg_dump.sql> and at least one <table_to_truncate>"
" arguments are required")
filepath = args[0]
tables_to_truncate = args[1:]
truncating = False
with open(filepath, 'rb') as infile:
for line in infile:
# If we are truncating a COPY block, stop doing so when we
# reach the end-of-COPY marker '\.'
if truncating:
if line.startswith('\.'):
truncating = False
else:
continue
# If we encounter a COPY block...
if line.startswith('COPY '):
# ...and the table name matches one we wish to truncate...
match = COPY_TABLENAME_RE.match(line)
tablename = match and match.group(1)
# ...print a comment and start truncating lines
if tablename in tables_to_truncate:
truncating = True
sys.stdout.write("--- TRUNCATED DATA ---\n")
# Echo most file lines to output, unless they are skipped
# during truncation above.
sys.stdout.write(line)
|
<commit_before><commit_msg>Add command to truncate table data in PostgreSQL db dump files
Example usage:
manage.py truncate_postgres_dbdump \
big.sql big_table_name1 big_table_name2 \
> small.sql<commit_after>
|
import sys # Needed to output unicode see https://code.djangoproject.com/ticket/21933
import re
from django.core.management.base import BaseCommand, CommandError
COPY_TABLENAME_RE = re.compile(r'^COPY ([^ ]+) .* FROM stdin;$')
class Command(BaseCommand):
help = ("Truncate unwanted table data in PostgreSQL DB dump files."
" Output is written to STDOUT")
args = '<pg_dump.sql> <table_to_truncate> [<table2_to_truncate>...]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"The <pg_dump.sql> and at least one <table_to_truncate>"
" arguments are required")
filepath = args[0]
tables_to_truncate = args[1:]
truncating = False
with open(filepath, 'rb') as infile:
for line in infile:
# If we are truncating a COPY block, stop doing so when we
# reach the end-of-COPY marker '\.'
if truncating:
if line.startswith('\.'):
truncating = False
else:
continue
# If we encounter a COPY block...
if line.startswith('COPY '):
# ...and the table name matches one we wish to truncate...
match = COPY_TABLENAME_RE.match(line)
tablename = match and match.group(1)
# ...print a comment and start truncating lines
if tablename in tables_to_truncate:
truncating = True
sys.stdout.write("--- TRUNCATED DATA ---\n")
# Echo most file lines to output, unless they are skipped
# during truncation above.
sys.stdout.write(line)
|
Add command to truncate table data in PostgreSQL db dump files
Example usage:
manage.py truncate_postgres_dbdump \
big.sql big_table_name1 big_table_name2 \
> small.sqlimport sys # Needed to output unicode see https://code.djangoproject.com/ticket/21933
import re
from django.core.management.base import BaseCommand, CommandError
COPY_TABLENAME_RE = re.compile(r'^COPY ([^ ]+) .* FROM stdin;$')
class Command(BaseCommand):
help = ("Truncate unwanted table data in PostgreSQL DB dump files."
" Output is written to STDOUT")
args = '<pg_dump.sql> <table_to_truncate> [<table2_to_truncate>...]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"The <pg_dump.sql> and at least one <table_to_truncate>"
" arguments are required")
filepath = args[0]
tables_to_truncate = args[1:]
truncating = False
with open(filepath, 'rb') as infile:
for line in infile:
# If we are truncating a COPY block, stop doing so when we
# reach the end-of-COPY marker '\.'
if truncating:
if line.startswith('\.'):
truncating = False
else:
continue
# If we encounter a COPY block...
if line.startswith('COPY '):
# ...and the table name matches one we wish to truncate...
match = COPY_TABLENAME_RE.match(line)
tablename = match and match.group(1)
# ...print a comment and start truncating lines
if tablename in tables_to_truncate:
truncating = True
sys.stdout.write("--- TRUNCATED DATA ---\n")
# Echo most file lines to output, unless they are skipped
# during truncation above.
sys.stdout.write(line)
|
<commit_before><commit_msg>Add command to truncate table data in PostgreSQL db dump files
Example usage:
manage.py truncate_postgres_dbdump \
big.sql big_table_name1 big_table_name2 \
> small.sql<commit_after>import sys # Needed to output unicode see https://code.djangoproject.com/ticket/21933
import re
from django.core.management.base import BaseCommand, CommandError
COPY_TABLENAME_RE = re.compile(r'^COPY ([^ ]+) .* FROM stdin;$')
class Command(BaseCommand):
help = ("Truncate unwanted table data in PostgreSQL DB dump files."
" Output is written to STDOUT")
args = '<pg_dump.sql> <table_to_truncate> [<table2_to_truncate>...]'
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"The <pg_dump.sql> and at least one <table_to_truncate>"
" arguments are required")
filepath = args[0]
tables_to_truncate = args[1:]
truncating = False
with open(filepath, 'rb') as infile:
for line in infile:
# If we are truncating a COPY block, stop doing so when we
# reach the end-of-COPY marker '\.'
if truncating:
if line.startswith('\.'):
truncating = False
else:
continue
# If we encounter a COPY block...
if line.startswith('COPY '):
# ...and the table name matches one we wish to truncate...
match = COPY_TABLENAME_RE.match(line)
tablename = match and match.group(1)
# ...print a comment and start truncating lines
if tablename in tables_to_truncate:
truncating = True
sys.stdout.write("--- TRUNCATED DATA ---\n")
# Echo most file lines to output, unless they are skipped
# during truncation above.
sys.stdout.write(line)
|
|
fce66218e682d80076ef744b9206b9cc042891b2
|
corehq/apps/commtrack/management/commands/populate_site_code.py
|
corehq/apps/commtrack/management/commands/populate_site_code.py
|
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import Location
class Command(BaseCommand):
help = 'Generate missing site codes for locations'
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
relevant_ids = set([r['id'] for r in Location.get_db().view(
'locations/by_type',
reduce=False,
).all()])
for loc_id in relevant_ids:
loc = Location.get(loc_id)
if not loc.site_code:
# triggering the safe will cause this to get populated
self.stdout.write("Updating location %s\n" % loc.name)
loc.save()
|
Add management command to populate site codes
|
Add management command to populate site codes
|
Python
|
bsd-3-clause
|
SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq
|
Add management command to populate site codes
|
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import Location
class Command(BaseCommand):
help = 'Generate missing site codes for locations'
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
relevant_ids = set([r['id'] for r in Location.get_db().view(
'locations/by_type',
reduce=False,
).all()])
for loc_id in relevant_ids:
loc = Location.get(loc_id)
if not loc.site_code:
# triggering the safe will cause this to get populated
self.stdout.write("Updating location %s\n" % loc.name)
loc.save()
|
<commit_before><commit_msg>Add management command to populate site codes<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import Location
class Command(BaseCommand):
help = 'Generate missing site codes for locations'
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
relevant_ids = set([r['id'] for r in Location.get_db().view(
'locations/by_type',
reduce=False,
).all()])
for loc_id in relevant_ids:
loc = Location.get(loc_id)
if not loc.site_code:
# triggering the safe will cause this to get populated
self.stdout.write("Updating location %s\n" % loc.name)
loc.save()
|
Add management command to populate site codesfrom django.core.management.base import BaseCommand
from corehq.apps.locations.models import Location
class Command(BaseCommand):
help = 'Generate missing site codes for locations'
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
relevant_ids = set([r['id'] for r in Location.get_db().view(
'locations/by_type',
reduce=False,
).all()])
for loc_id in relevant_ids:
loc = Location.get(loc_id)
if not loc.site_code:
# triggering the safe will cause this to get populated
self.stdout.write("Updating location %s\n" % loc.name)
loc.save()
|
<commit_before><commit_msg>Add management command to populate site codes<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.locations.models import Location
class Command(BaseCommand):
help = 'Generate missing site codes for locations'
def handle(self, *args, **options):
self.stdout.write("Populating site codes...\n")
relevant_ids = set([r['id'] for r in Location.get_db().view(
'locations/by_type',
reduce=False,
).all()])
for loc_id in relevant_ids:
loc = Location.get(loc_id)
if not loc.site_code:
# triggering the safe will cause this to get populated
self.stdout.write("Updating location %s\n" % loc.name)
loc.save()
|
|
a5712ad442de5eb9e68e111436049a50fa72505d
|
tests/conftest.py
|
tests/conftest.py
|
"""Global test configuration"""
import os
import betamax
# Ensure cassete dir
CASSETE_DIR = 'tests/cassetes/'
if not os.path.exists(CASSETE_DIR):
os.makedirs(CASSETE_DIR)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = CASSETE_DIR
|
Configure betamax for pytest usage
|
Configure betamax for pytest usage
|
Python
|
agpl-3.0
|
khardix/mccurse
|
Configure betamax for pytest usage
|
"""Global test configuration"""
import os
import betamax
# Ensure cassete dir
CASSETE_DIR = 'tests/cassetes/'
if not os.path.exists(CASSETE_DIR):
os.makedirs(CASSETE_DIR)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = CASSETE_DIR
|
<commit_before><commit_msg>Configure betamax for pytest usage<commit_after>
|
"""Global test configuration"""
import os
import betamax
# Ensure cassete dir
CASSETE_DIR = 'tests/cassetes/'
if not os.path.exists(CASSETE_DIR):
os.makedirs(CASSETE_DIR)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = CASSETE_DIR
|
Configure betamax for pytest usage"""Global test configuration"""
import os
import betamax
# Ensure cassete dir
CASSETE_DIR = 'tests/cassetes/'
if not os.path.exists(CASSETE_DIR):
os.makedirs(CASSETE_DIR)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = CASSETE_DIR
|
<commit_before><commit_msg>Configure betamax for pytest usage<commit_after>"""Global test configuration"""
import os
import betamax
# Ensure cassete dir
CASSETE_DIR = 'tests/cassetes/'
if not os.path.exists(CASSETE_DIR):
os.makedirs(CASSETE_DIR)
with betamax.Betamax.configure() as config:
config.cassette_library_dir = CASSETE_DIR
|
|
f44805d8673bdf1382a48962e46ba4e2a7ec7e79
|
delete_node.py
|
delete_node.py
|
"""
Write a function to delete a node (except the tail)
in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and
you are given the third node with value 3,
the linked list should become 1 -> 2 -> 4 after calling your function.
"""
def delete_node(node):
if node is None or node.next is None:
raise ValueError
node.val = node.next.val
node.next = node.next.next
|
Add check for None values in node or node.next
|
Add check for None values in node or node.next
We raise a ValueError if the rules of the API are broken (passing a null node or the tail node)
|
Python
|
mit
|
keon/algorithms,amaozhao/algorithms
|
Add check for None values in node or node.next
We raise a ValueError if the rules of the API are broken (passing a null node or the tail node)
|
"""
Write a function to delete a node (except the tail)
in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and
you are given the third node with value 3,
the linked list should become 1 -> 2 -> 4 after calling your function.
"""
def delete_node(node):
if node is None or node.next is None:
raise ValueError
node.val = node.next.val
node.next = node.next.next
|
<commit_before><commit_msg>Add check for None values in node or node.next
We raise a ValueError if the rules of the API are broken (passing a null node or the tail node)<commit_after>
|
"""
Write a function to delete a node (except the tail)
in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and
you are given the third node with value 3,
the linked list should become 1 -> 2 -> 4 after calling your function.
"""
def delete_node(node):
if node is None or node.next is None:
raise ValueError
node.val = node.next.val
node.next = node.next.next
|
Add check for None values in node or node.next
We raise a ValueError if the rules of the API are broken (passing a null node or the tail node)"""
Write a function to delete a node (except the tail)
in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and
you are given the third node with value 3,
the linked list should become 1 -> 2 -> 4 after calling your function.
"""
def delete_node(node):
if node is None or node.next is None:
raise ValueError
node.val = node.next.val
node.next = node.next.next
|
<commit_before><commit_msg>Add check for None values in node or node.next
We raise a ValueError if the rules of the API are broken (passing a null node or the tail node)<commit_after>"""
Write a function to delete a node (except the tail)
in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and
you are given the third node with value 3,
the linked list should become 1 -> 2 -> 4 after calling your function.
"""
def delete_node(node):
if node is None or node.next is None:
raise ValueError
node.val = node.next.val
node.next = node.next.next
|
|
16654e7c02831e1d8daede1a3d83cbcfa0d5f92b
|
tests/test_stacks_file.py
|
tests/test_stacks_file.py
|
import json
from dmaws.stacks import Stack
from dmaws.context import Context
def is_true(x):
assert x
def is_in(a, b):
assert a in b
def valid_stack_json(stack):
text = stack.build('stage', 'env', {}).template_body
template = json.loads(text)
assert 'Parameters' in template
assert set(template['Parameters']) == set(stack.parameters)
assert 'Resources' in template
def test_stack_definitions():
ctx = Context()
ctx.load_stacks('stacks.yml')
yield('Found stacks in the stacks.yml',
is_true, any(isinstance(s, Stack) for s in ctx.stacks.values()))
yield('Found groups in stacks.yml',
is_true, any(isinstance(s, list) for s in ctx.stacks.values()))
for name, stack in ctx.stacks.items():
if isinstance(stack, list):
for s in stack:
yield('Stack "%s" in group %s is defined' % (s, name),
is_in, s, ctx.stacks)
else:
for s in stack.dependencies:
yield('%s dependency "%s" is defined' % (name, s),
is_in, s, ctx.stacks)
yield('Stack "%s" template_body is valid JSON' % name,
valid_stack_json, stack)
|
Add tests for the stacks.yml file
|
Add tests for the stacks.yml file
|
Python
|
mit
|
alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws
|
Add tests for the stacks.yml file
|
import json
from dmaws.stacks import Stack
from dmaws.context import Context
def is_true(x):
assert x
def is_in(a, b):
assert a in b
def valid_stack_json(stack):
text = stack.build('stage', 'env', {}).template_body
template = json.loads(text)
assert 'Parameters' in template
assert set(template['Parameters']) == set(stack.parameters)
assert 'Resources' in template
def test_stack_definitions():
ctx = Context()
ctx.load_stacks('stacks.yml')
yield('Found stacks in the stacks.yml',
is_true, any(isinstance(s, Stack) for s in ctx.stacks.values()))
yield('Found groups in stacks.yml',
is_true, any(isinstance(s, list) for s in ctx.stacks.values()))
for name, stack in ctx.stacks.items():
if isinstance(stack, list):
for s in stack:
yield('Stack "%s" in group %s is defined' % (s, name),
is_in, s, ctx.stacks)
else:
for s in stack.dependencies:
yield('%s dependency "%s" is defined' % (name, s),
is_in, s, ctx.stacks)
yield('Stack "%s" template_body is valid JSON' % name,
valid_stack_json, stack)
|
<commit_before><commit_msg>Add tests for the stacks.yml file<commit_after>
|
import json
from dmaws.stacks import Stack
from dmaws.context import Context
def is_true(x):
assert x
def is_in(a, b):
assert a in b
def valid_stack_json(stack):
text = stack.build('stage', 'env', {}).template_body
template = json.loads(text)
assert 'Parameters' in template
assert set(template['Parameters']) == set(stack.parameters)
assert 'Resources' in template
def test_stack_definitions():
ctx = Context()
ctx.load_stacks('stacks.yml')
yield('Found stacks in the stacks.yml',
is_true, any(isinstance(s, Stack) for s in ctx.stacks.values()))
yield('Found groups in stacks.yml',
is_true, any(isinstance(s, list) for s in ctx.stacks.values()))
for name, stack in ctx.stacks.items():
if isinstance(stack, list):
for s in stack:
yield('Stack "%s" in group %s is defined' % (s, name),
is_in, s, ctx.stacks)
else:
for s in stack.dependencies:
yield('%s dependency "%s" is defined' % (name, s),
is_in, s, ctx.stacks)
yield('Stack "%s" template_body is valid JSON' % name,
valid_stack_json, stack)
|
Add tests for the stacks.yml fileimport json
from dmaws.stacks import Stack
from dmaws.context import Context
def is_true(x):
assert x
def is_in(a, b):
assert a in b
def valid_stack_json(stack):
text = stack.build('stage', 'env', {}).template_body
template = json.loads(text)
assert 'Parameters' in template
assert set(template['Parameters']) == set(stack.parameters)
assert 'Resources' in template
def test_stack_definitions():
ctx = Context()
ctx.load_stacks('stacks.yml')
yield('Found stacks in the stacks.yml',
is_true, any(isinstance(s, Stack) for s in ctx.stacks.values()))
yield('Found groups in stacks.yml',
is_true, any(isinstance(s, list) for s in ctx.stacks.values()))
for name, stack in ctx.stacks.items():
if isinstance(stack, list):
for s in stack:
yield('Stack "%s" in group %s is defined' % (s, name),
is_in, s, ctx.stacks)
else:
for s in stack.dependencies:
yield('%s dependency "%s" is defined' % (name, s),
is_in, s, ctx.stacks)
yield('Stack "%s" template_body is valid JSON' % name,
valid_stack_json, stack)
|
<commit_before><commit_msg>Add tests for the stacks.yml file<commit_after>import json
from dmaws.stacks import Stack
from dmaws.context import Context
def is_true(x):
assert x
def is_in(a, b):
assert a in b
def valid_stack_json(stack):
text = stack.build('stage', 'env', {}).template_body
template = json.loads(text)
assert 'Parameters' in template
assert set(template['Parameters']) == set(stack.parameters)
assert 'Resources' in template
def test_stack_definitions():
ctx = Context()
ctx.load_stacks('stacks.yml')
yield('Found stacks in the stacks.yml',
is_true, any(isinstance(s, Stack) for s in ctx.stacks.values()))
yield('Found groups in stacks.yml',
is_true, any(isinstance(s, list) for s in ctx.stacks.values()))
for name, stack in ctx.stacks.items():
if isinstance(stack, list):
for s in stack:
yield('Stack "%s" in group %s is defined' % (s, name),
is_in, s, ctx.stacks)
else:
for s in stack.dependencies:
yield('%s dependency "%s" is defined' % (name, s),
is_in, s, ctx.stacks)
yield('Stack "%s" template_body is valid JSON' % name,
valid_stack_json, stack)
|
|
f3f94fc218220e91197e639b0b3e5514741623f0
|
lib/awx_cli/commands/JobLaunchCommand.py
|
lib/awx_cli/commands/JobLaunchCommand.py
|
# Copyright 2013, AnsibleWorks Inc.
# Michael DeHaan <michael@ansibleworks.com>
# Chris Church <cchurch@ansibleworks.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BaseCommand
import awx_cli
import awx_cli.common as common
import datetime
import getpass
class JobLaunchCommand(BaseCommand.BaseCommand):
""" shows AWX version information """
def __init__(self, toplevel):
super(JobLaunchCommand, self).__init__(toplevel)
self.name = "joblaunch"
def run(self, args):
# parse arguments and form connection
parser = common.get_parser()
parser.add_option('-t', '--template', dest='template',
default=None, type='int')
(options, args) = parser.parse_args()
if options.template is None:
raise common.BaseException("--template is required")
# test API connection
handle = common.connect(options)
# get the job template
jt_url = "/api/v1/job_templates/%d/" % options.template
data = handle.get(jt_url)
id = data.pop('id')
# add some more info needed to start the job
# NOTE: a URL to launch job templates directly
# may be added later, but this is basically a copy of the job template
# data to the jobs resource, which is also fine.
now = str(datetime.datetime.now())
data.update(dict(
name = 'cli job invocation started at %s' % now,
verbosity = 0,
))
# post a new job
jt_jobs_url = "%sjobs/" % jt_url
job_result = handle.post(jt_jobs_url, data)
# get the parameters needed to start the job (if any)
# prompt for values unless given on command line (FIXME)
print "URL=%s" % jt_jobs_url
job_id = job_result['id']
job_start_url = "/jobs/%d/start/" % job_id
job_start_info = handle.get(job_start_url)
start_data = {}
for password in job_start_info.get('passwords_needed_to_start', []):
value = getpass.getpass('%s: ' % password)
start_data[password] = value
# start the job
job_start_result = handle.post(job_start_url, start_data)
print common.dump(job_start_result)
# TODO: optional status polling (FIXME)
return 0
|
Add joblaunch command to source control
|
Add joblaunch command to source control
|
Python
|
apache-2.0
|
jangsutsr/tower-cli,tomfotherby/tower-cli,ansible/tower-cli,AlanCoding/tower-cli,docschick/tower-cli,ansible/tower-cli,cedub/tower-cli,nitzmahone/tower-cli,AlanCoding/tower-cli,chrismeyersfsu/tower-cli
|
Add joblaunch command to source control
|
# Copyright 2013, AnsibleWorks Inc.
# Michael DeHaan <michael@ansibleworks.com>
# Chris Church <cchurch@ansibleworks.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BaseCommand
import awx_cli
import awx_cli.common as common
import datetime
import getpass
class JobLaunchCommand(BaseCommand.BaseCommand):
""" shows AWX version information """
def __init__(self, toplevel):
super(JobLaunchCommand, self).__init__(toplevel)
self.name = "joblaunch"
def run(self, args):
# parse arguments and form connection
parser = common.get_parser()
parser.add_option('-t', '--template', dest='template',
default=None, type='int')
(options, args) = parser.parse_args()
if options.template is None:
raise common.BaseException("--template is required")
# test API connection
handle = common.connect(options)
# get the job template
jt_url = "/api/v1/job_templates/%d/" % options.template
data = handle.get(jt_url)
id = data.pop('id')
# add some more info needed to start the job
# NOTE: a URL to launch job templates directly
# may be added later, but this is basically a copy of the job template
# data to the jobs resource, which is also fine.
now = str(datetime.datetime.now())
data.update(dict(
name = 'cli job invocation started at %s' % now,
verbosity = 0,
))
# post a new job
jt_jobs_url = "%sjobs/" % jt_url
job_result = handle.post(jt_jobs_url, data)
# get the parameters needed to start the job (if any)
# prompt for values unless given on command line (FIXME)
print "URL=%s" % jt_jobs_url
job_id = job_result['id']
job_start_url = "/jobs/%d/start/" % job_id
job_start_info = handle.get(job_start_url)
start_data = {}
for password in job_start_info.get('passwords_needed_to_start', []):
value = getpass.getpass('%s: ' % password)
start_data[password] = value
# start the job
job_start_result = handle.post(job_start_url, start_data)
print common.dump(job_start_result)
# TODO: optional status polling (FIXME)
return 0
|
<commit_before><commit_msg>Add joblaunch command to source control<commit_after>
|
# Copyright 2013, AnsibleWorks Inc.
# Michael DeHaan <michael@ansibleworks.com>
# Chris Church <cchurch@ansibleworks.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BaseCommand
import awx_cli
import awx_cli.common as common
import datetime
import getpass
class JobLaunchCommand(BaseCommand.BaseCommand):
""" shows AWX version information """
def __init__(self, toplevel):
super(JobLaunchCommand, self).__init__(toplevel)
self.name = "joblaunch"
def run(self, args):
# parse arguments and form connection
parser = common.get_parser()
parser.add_option('-t', '--template', dest='template',
default=None, type='int')
(options, args) = parser.parse_args()
if options.template is None:
raise common.BaseException("--template is required")
# test API connection
handle = common.connect(options)
# get the job template
jt_url = "/api/v1/job_templates/%d/" % options.template
data = handle.get(jt_url)
id = data.pop('id')
# add some more info needed to start the job
# NOTE: a URL to launch job templates directly
# may be added later, but this is basically a copy of the job template
# data to the jobs resource, which is also fine.
now = str(datetime.datetime.now())
data.update(dict(
name = 'cli job invocation started at %s' % now,
verbosity = 0,
))
# post a new job
jt_jobs_url = "%sjobs/" % jt_url
job_result = handle.post(jt_jobs_url, data)
# get the parameters needed to start the job (if any)
# prompt for values unless given on command line (FIXME)
print "URL=%s" % jt_jobs_url
job_id = job_result['id']
job_start_url = "/jobs/%d/start/" % job_id
job_start_info = handle.get(job_start_url)
start_data = {}
for password in job_start_info.get('passwords_needed_to_start', []):
value = getpass.getpass('%s: ' % password)
start_data[password] = value
# start the job
job_start_result = handle.post(job_start_url, start_data)
print common.dump(job_start_result)
# TODO: optional status polling (FIXME)
return 0
|
Add joblaunch command to source control# Copyright 2013, AnsibleWorks Inc.
# Michael DeHaan <michael@ansibleworks.com>
# Chris Church <cchurch@ansibleworks.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BaseCommand
import awx_cli
import awx_cli.common as common
import datetime
import getpass
class JobLaunchCommand(BaseCommand.BaseCommand):
""" shows AWX version information """
def __init__(self, toplevel):
super(JobLaunchCommand, self).__init__(toplevel)
self.name = "joblaunch"
def run(self, args):
# parse arguments and form connection
parser = common.get_parser()
parser.add_option('-t', '--template', dest='template',
default=None, type='int')
(options, args) = parser.parse_args()
if options.template is None:
raise common.BaseException("--template is required")
# test API connection
handle = common.connect(options)
# get the job template
jt_url = "/api/v1/job_templates/%d/" % options.template
data = handle.get(jt_url)
id = data.pop('id')
# add some more info needed to start the job
# NOTE: a URL to launch job templates directly
# may be added later, but this is basically a copy of the job template
# data to the jobs resource, which is also fine.
now = str(datetime.datetime.now())
data.update(dict(
name = 'cli job invocation started at %s' % now,
verbosity = 0,
))
# post a new job
jt_jobs_url = "%sjobs/" % jt_url
job_result = handle.post(jt_jobs_url, data)
# get the parameters needed to start the job (if any)
# prompt for values unless given on command line (FIXME)
print "URL=%s" % jt_jobs_url
job_id = job_result['id']
job_start_url = "/jobs/%d/start/" % job_id
job_start_info = handle.get(job_start_url)
start_data = {}
for password in job_start_info.get('passwords_needed_to_start', []):
value = getpass.getpass('%s: ' % password)
start_data[password] = value
# start the job
job_start_result = handle.post(job_start_url, start_data)
print common.dump(job_start_result)
# TODO: optional status polling (FIXME)
return 0
|
<commit_before><commit_msg>Add joblaunch command to source control<commit_after># Copyright 2013, AnsibleWorks Inc.
# Michael DeHaan <michael@ansibleworks.com>
# Chris Church <cchurch@ansibleworks.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import BaseCommand
import awx_cli
import awx_cli.common as common
import datetime
import getpass
class JobLaunchCommand(BaseCommand.BaseCommand):
""" shows AWX version information """
def __init__(self, toplevel):
super(JobLaunchCommand, self).__init__(toplevel)
self.name = "joblaunch"
def run(self, args):
# parse arguments and form connection
parser = common.get_parser()
parser.add_option('-t', '--template', dest='template',
default=None, type='int')
(options, args) = parser.parse_args()
if options.template is None:
raise common.BaseException("--template is required")
# test API connection
handle = common.connect(options)
# get the job template
jt_url = "/api/v1/job_templates/%d/" % options.template
data = handle.get(jt_url)
id = data.pop('id')
# add some more info needed to start the job
# NOTE: a URL to launch job templates directly
# may be added later, but this is basically a copy of the job template
# data to the jobs resource, which is also fine.
now = str(datetime.datetime.now())
data.update(dict(
name = 'cli job invocation started at %s' % now,
verbosity = 0,
))
# post a new job
jt_jobs_url = "%sjobs/" % jt_url
job_result = handle.post(jt_jobs_url, data)
# get the parameters needed to start the job (if any)
# prompt for values unless given on command line (FIXME)
print "URL=%s" % jt_jobs_url
job_id = job_result['id']
job_start_url = "/jobs/%d/start/" % job_id
job_start_info = handle.get(job_start_url)
start_data = {}
for password in job_start_info.get('passwords_needed_to_start', []):
value = getpass.getpass('%s: ' % password)
start_data[password] = value
# start the job
job_start_result = handle.post(job_start_url, start_data)
print common.dump(job_start_result)
# TODO: optional status polling (FIXME)
return 0
|
|
dcecd21f8237f9e455fa2aaf4862530470e7ffa8
|
migrations/versions/0114_drop_monthly_billing_cols.py
|
migrations/versions/0114_drop_monthly_billing_cols.py
|
"""
Revision ID: 0014_drop_monthly_billing_cols
Revises: 0113_job_created_by_nullable
Create Date: 2017-07-27 13:36:37.304344
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0014_drop_monthly_billing_cols'
down_revision = '0113_job_created_by_nullable'
def upgrade():
op.drop_index('uix_monthly_billing', table_name='monthly_billing')
op.create_unique_constraint(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type']
)
op.drop_column('monthly_billing', 'year')
op.drop_column('monthly_billing', 'month')
def downgrade():
op.add_column('monthly_billing', sa.Column('month', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column(
'monthly_billing',
sa.Column('year', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_constraint('uix_monthly_billing', 'monthly_billing', type_='unique')
op.create_index(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type'], unique=True
)
|
Drop unused month and year columns from monthlybilling
|
Drop unused month and year columns from monthlybilling
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Drop unused month and year columns from monthlybilling
|
"""
Revision ID: 0014_drop_monthly_billing_cols
Revises: 0113_job_created_by_nullable
Create Date: 2017-07-27 13:36:37.304344
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0014_drop_monthly_billing_cols'
down_revision = '0113_job_created_by_nullable'
def upgrade():
op.drop_index('uix_monthly_billing', table_name='monthly_billing')
op.create_unique_constraint(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type']
)
op.drop_column('monthly_billing', 'year')
op.drop_column('monthly_billing', 'month')
def downgrade():
op.add_column('monthly_billing', sa.Column('month', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column(
'monthly_billing',
sa.Column('year', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_constraint('uix_monthly_billing', 'monthly_billing', type_='unique')
op.create_index(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type'], unique=True
)
|
<commit_before><commit_msg>Drop unused month and year columns from monthlybilling<commit_after>
|
"""
Revision ID: 0014_drop_monthly_billing_cols
Revises: 0113_job_created_by_nullable
Create Date: 2017-07-27 13:36:37.304344
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0014_drop_monthly_billing_cols'
down_revision = '0113_job_created_by_nullable'
def upgrade():
op.drop_index('uix_monthly_billing', table_name='monthly_billing')
op.create_unique_constraint(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type']
)
op.drop_column('monthly_billing', 'year')
op.drop_column('monthly_billing', 'month')
def downgrade():
op.add_column('monthly_billing', sa.Column('month', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column(
'monthly_billing',
sa.Column('year', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_constraint('uix_monthly_billing', 'monthly_billing', type_='unique')
op.create_index(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type'], unique=True
)
|
Drop unused month and year columns from monthlybilling"""
Revision ID: 0014_drop_monthly_billing_cols
Revises: 0113_job_created_by_nullable
Create Date: 2017-07-27 13:36:37.304344
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0014_drop_monthly_billing_cols'
down_revision = '0113_job_created_by_nullable'
def upgrade():
op.drop_index('uix_monthly_billing', table_name='monthly_billing')
op.create_unique_constraint(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type']
)
op.drop_column('monthly_billing', 'year')
op.drop_column('monthly_billing', 'month')
def downgrade():
op.add_column('monthly_billing', sa.Column('month', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column(
'monthly_billing',
sa.Column('year', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_constraint('uix_monthly_billing', 'monthly_billing', type_='unique')
op.create_index(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type'], unique=True
)
|
<commit_before><commit_msg>Drop unused month and year columns from monthlybilling<commit_after>"""
Revision ID: 0014_drop_monthly_billing_cols
Revises: 0113_job_created_by_nullable
Create Date: 2017-07-27 13:36:37.304344
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0014_drop_monthly_billing_cols'
down_revision = '0113_job_created_by_nullable'
def upgrade():
op.drop_index('uix_monthly_billing', table_name='monthly_billing')
op.create_unique_constraint(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type']
)
op.drop_column('monthly_billing', 'year')
op.drop_column('monthly_billing', 'month')
def downgrade():
op.add_column('monthly_billing', sa.Column('month', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column(
'monthly_billing',
sa.Column('year', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True)
)
op.drop_constraint('uix_monthly_billing', 'monthly_billing', type_='unique')
op.create_index(
'uix_monthly_billing', 'monthly_billing', ['service_id', 'start_date', 'notification_type'], unique=True
)
|
|
924de7e93a8d73932253b10699ab0f481761586e
|
seacat/spdy/vle.py
|
seacat/spdy/vle.py
|
import struct
def spdy_add_vle_string(frame, string):
buf = bytes(string, 'utf-8')
buf_len = len(buf)
if buf_len >= 0xFA:
struct.pack_into('!BH', frame.data, frame.position, 0xFF, buf_len)
frame.position += struct.calcsize('!BH')
else:
struct.pack_into('!B', frame.data, frame.position, buf_len)
frame.position += struct.calcsize('!B')
struct.pack_into('!{}s'.format(buf_len), frame.data, frame.position, buf)
frame.position += buf_len
def spdy_read_vle_string(frame):
len, = struct.unpack_from("!B", frame.data, frame.position)
frame.position += struct.calcsize('!B')
if (len == 0xFF):
len, = struct.unpack_from("!H", frame.data, frame.position)
frame.position += struct.calcsize('!H')
data, = struct.unpack_from("!{}s".format(len), frame.data, frame.position)
frame.position += struct.calcsize("!{}s".format(len))
return data
|
Add functions to build and parse variable-length (VLE) strings
|
Add functions to build and parse variable-length (VLE) strings
|
Python
|
bsd-3-clause
|
TeskaLabs/SeaCat-Client-Python3
|
Add functions to build and parse variable-length (VLE) strings
|
import struct
def spdy_add_vle_string(frame, string):
buf = bytes(string, 'utf-8')
buf_len = len(buf)
if buf_len >= 0xFA:
struct.pack_into('!BH', frame.data, frame.position, 0xFF, buf_len)
frame.position += struct.calcsize('!BH')
else:
struct.pack_into('!B', frame.data, frame.position, buf_len)
frame.position += struct.calcsize('!B')
struct.pack_into('!{}s'.format(buf_len), frame.data, frame.position, buf)
frame.position += buf_len
def spdy_read_vle_string(frame):
len, = struct.unpack_from("!B", frame.data, frame.position)
frame.position += struct.calcsize('!B')
if (len == 0xFF):
len, = struct.unpack_from("!H", frame.data, frame.position)
frame.position += struct.calcsize('!H')
data, = struct.unpack_from("!{}s".format(len), frame.data, frame.position)
frame.position += struct.calcsize("!{}s".format(len))
return data
|
<commit_before><commit_msg>Add functions to build and parse variable-length (VLE) strings<commit_after>
|
import struct
def spdy_add_vle_string(frame, string):
buf = bytes(string, 'utf-8')
buf_len = len(buf)
if buf_len >= 0xFA:
struct.pack_into('!BH', frame.data, frame.position, 0xFF, buf_len)
frame.position += struct.calcsize('!BH')
else:
struct.pack_into('!B', frame.data, frame.position, buf_len)
frame.position += struct.calcsize('!B')
struct.pack_into('!{}s'.format(buf_len), frame.data, frame.position, buf)
frame.position += buf_len
def spdy_read_vle_string(frame):
len, = struct.unpack_from("!B", frame.data, frame.position)
frame.position += struct.calcsize('!B')
if (len == 0xFF):
len, = struct.unpack_from("!H", frame.data, frame.position)
frame.position += struct.calcsize('!H')
data, = struct.unpack_from("!{}s".format(len), frame.data, frame.position)
frame.position += struct.calcsize("!{}s".format(len))
return data
|
Add functions to build and parse variable-length (VLE) stringsimport struct
def spdy_add_vle_string(frame, string):
buf = bytes(string, 'utf-8')
buf_len = len(buf)
if buf_len >= 0xFA:
struct.pack_into('!BH', frame.data, frame.position, 0xFF, buf_len)
frame.position += struct.calcsize('!BH')
else:
struct.pack_into('!B', frame.data, frame.position, buf_len)
frame.position += struct.calcsize('!B')
struct.pack_into('!{}s'.format(buf_len), frame.data, frame.position, buf)
frame.position += buf_len
def spdy_read_vle_string(frame):
len, = struct.unpack_from("!B", frame.data, frame.position)
frame.position += struct.calcsize('!B')
if (len == 0xFF):
len, = struct.unpack_from("!H", frame.data, frame.position)
frame.position += struct.calcsize('!H')
data, = struct.unpack_from("!{}s".format(len), frame.data, frame.position)
frame.position += struct.calcsize("!{}s".format(len))
return data
|
<commit_before><commit_msg>Add functions to build and parse variable-length (VLE) strings<commit_after>import struct
def spdy_add_vle_string(frame, string):
buf = bytes(string, 'utf-8')
buf_len = len(buf)
if buf_len >= 0xFA:
struct.pack_into('!BH', frame.data, frame.position, 0xFF, buf_len)
frame.position += struct.calcsize('!BH')
else:
struct.pack_into('!B', frame.data, frame.position, buf_len)
frame.position += struct.calcsize('!B')
struct.pack_into('!{}s'.format(buf_len), frame.data, frame.position, buf)
frame.position += buf_len
def spdy_read_vle_string(frame):
len, = struct.unpack_from("!B", frame.data, frame.position)
frame.position += struct.calcsize('!B')
if (len == 0xFF):
len, = struct.unpack_from("!H", frame.data, frame.position)
frame.position += struct.calcsize('!H')
data, = struct.unpack_from("!{}s".format(len), frame.data, frame.position)
frame.position += struct.calcsize("!{}s".format(len))
return data
|
|
13cb9144047b0aa24260ff4b2bdbc5d2fcd8b82b
|
util/ublox_pkt.py
|
util/ublox_pkt.py
|
#
# Copyright (c) Michael Tharp <gxti@partiallystapled.com>
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
import struct
import sys
msg_cls = int(sys.argv[1], 16)
msg_id = int(sys.argv[2], 16)
data = [int(x, 16) for x in sys.argv[3:]]
len_words = struct.unpack('BB', struct.pack('<H', len(data)))
words = [msg_cls, msg_id] + list(len_words) + data
ck1 = ck2 = 0
for word in words:
ck1 = (ck1 + word) & 0xFF
ck2 = (ck2 + ck1) & 0xFF
words = [0xB5, 0x62] + words + [ck1, ck2]
while words:
chunk, words = words[:8], words[8:]
print ' '.join('0x%02X,' % x for x in chunk)
|
Add tool for generating ublox config packets
|
Add tool for generating ublox config packets
|
Python
|
mit
|
mtharp/laureline-firmware,mtharp/laureline-firmware,mtharp/laureline-firmware,mtharp/laureline-firmware
|
Add tool for generating ublox config packets
|
#
# Copyright (c) Michael Tharp <gxti@partiallystapled.com>
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
import struct
import sys
msg_cls = int(sys.argv[1], 16)
msg_id = int(sys.argv[2], 16)
data = [int(x, 16) for x in sys.argv[3:]]
len_words = struct.unpack('BB', struct.pack('<H', len(data)))
words = [msg_cls, msg_id] + list(len_words) + data
ck1 = ck2 = 0
for word in words:
ck1 = (ck1 + word) & 0xFF
ck2 = (ck2 + ck1) & 0xFF
words = [0xB5, 0x62] + words + [ck1, ck2]
while words:
chunk, words = words[:8], words[8:]
print ' '.join('0x%02X,' % x for x in chunk)
|
<commit_before><commit_msg>Add tool for generating ublox config packets<commit_after>
|
#
# Copyright (c) Michael Tharp <gxti@partiallystapled.com>
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
import struct
import sys
msg_cls = int(sys.argv[1], 16)
msg_id = int(sys.argv[2], 16)
data = [int(x, 16) for x in sys.argv[3:]]
len_words = struct.unpack('BB', struct.pack('<H', len(data)))
words = [msg_cls, msg_id] + list(len_words) + data
ck1 = ck2 = 0
for word in words:
ck1 = (ck1 + word) & 0xFF
ck2 = (ck2 + ck1) & 0xFF
words = [0xB5, 0x62] + words + [ck1, ck2]
while words:
chunk, words = words[:8], words[8:]
print ' '.join('0x%02X,' % x for x in chunk)
|
Add tool for generating ublox config packets#
# Copyright (c) Michael Tharp <gxti@partiallystapled.com>
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
import struct
import sys
msg_cls = int(sys.argv[1], 16)
msg_id = int(sys.argv[2], 16)
data = [int(x, 16) for x in sys.argv[3:]]
len_words = struct.unpack('BB', struct.pack('<H', len(data)))
words = [msg_cls, msg_id] + list(len_words) + data
ck1 = ck2 = 0
for word in words:
ck1 = (ck1 + word) & 0xFF
ck2 = (ck2 + ck1) & 0xFF
words = [0xB5, 0x62] + words + [ck1, ck2]
while words:
chunk, words = words[:8], words[8:]
print ' '.join('0x%02X,' % x for x in chunk)
|
<commit_before><commit_msg>Add tool for generating ublox config packets<commit_after>#
# Copyright (c) Michael Tharp <gxti@partiallystapled.com>
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
import struct
import sys
msg_cls = int(sys.argv[1], 16)
msg_id = int(sys.argv[2], 16)
data = [int(x, 16) for x in sys.argv[3:]]
len_words = struct.unpack('BB', struct.pack('<H', len(data)))
words = [msg_cls, msg_id] + list(len_words) + data
ck1 = ck2 = 0
for word in words:
ck1 = (ck1 + word) & 0xFF
ck2 = (ck2 + ck1) & 0xFF
words = [0xB5, 0x62] + words + [ck1, ck2]
while words:
chunk, words = words[:8], words[8:]
print ' '.join('0x%02X,' % x for x in chunk)
|
|
cb815258413d8ff22a677663be57238b8f8f3654
|
fpr/migrations/0019_fix_gs_command.py
|
fpr/migrations/0019_fix_gs_command.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Fix the Ghostscript normalization command "Command Transcoding to pdfa
with Ghostscript" so that it documents its true output format as PDF/A 1b
(fmt/354) and not PDF/A 1a (fmt/95).
"""
FPCommand = apps.get_model('fpr', 'FPCommand')
FormatVersion = apps.get_model('fpr', 'FormatVersion')
true_format_version = FormatVersion.objects.get(pronom_id='fmt/354')
FPCommand.objects.filter(
uuid='d6a33093-85d5-4088-83e1-b7a774a826bd').update(
output_format=true_format_version)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0018_slug_unique'),
]
operations = [
migrations.RunPython(data_migration),
]
|
Fix the Ghostscript normalization output format
|
Fix the Ghostscript normalization output format
Adds a migration to fix the Ghostscript normalization command "Command
Transcoding to pdfa with Ghostscript" so that it documents its true output
format as PDF/A 1b (fmt/354) and not PDF/A 1a (fmt/95).
Contributes to fixing https://github.com/artefactual/archivematica/issues/1158
|
Python
|
agpl-3.0
|
artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin
|
Fix the Ghostscript normalization output format
Adds a migration to fix the Ghostscript normalization command "Command
Transcoding to pdfa with Ghostscript" so that it documents its true output
format as PDF/A 1b (fmt/354) and not PDF/A 1a (fmt/95).
Contributes to fixing https://github.com/artefactual/archivematica/issues/1158
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Fix the Ghostscript normalization command "Command Transcoding to pdfa
with Ghostscript" so that it documents its true output format as PDF/A 1b
(fmt/354) and not PDF/A 1a (fmt/95).
"""
FPCommand = apps.get_model('fpr', 'FPCommand')
FormatVersion = apps.get_model('fpr', 'FormatVersion')
true_format_version = FormatVersion.objects.get(pronom_id='fmt/354')
FPCommand.objects.filter(
uuid='d6a33093-85d5-4088-83e1-b7a774a826bd').update(
output_format=true_format_version)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0018_slug_unique'),
]
operations = [
migrations.RunPython(data_migration),
]
|
<commit_before><commit_msg>Fix the Ghostscript normalization output format
Adds a migration to fix the Ghostscript normalization command "Command
Transcoding to pdfa with Ghostscript" so that it documents its true output
format as PDF/A 1b (fmt/354) and not PDF/A 1a (fmt/95).
Contributes to fixing https://github.com/artefactual/archivematica/issues/1158<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Fix the Ghostscript normalization command "Command Transcoding to pdfa
with Ghostscript" so that it documents its true output format as PDF/A 1b
(fmt/354) and not PDF/A 1a (fmt/95).
"""
FPCommand = apps.get_model('fpr', 'FPCommand')
FormatVersion = apps.get_model('fpr', 'FormatVersion')
true_format_version = FormatVersion.objects.get(pronom_id='fmt/354')
FPCommand.objects.filter(
uuid='d6a33093-85d5-4088-83e1-b7a774a826bd').update(
output_format=true_format_version)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0018_slug_unique'),
]
operations = [
migrations.RunPython(data_migration),
]
|
Fix the Ghostscript normalization output format
Adds a migration to fix the Ghostscript normalization command "Command
Transcoding to pdfa with Ghostscript" so that it documents its true output
format as PDF/A 1b (fmt/354) and not PDF/A 1a (fmt/95).
Contributes to fixing https://github.com/artefactual/archivematica/issues/1158# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Fix the Ghostscript normalization command "Command Transcoding to pdfa
with Ghostscript" so that it documents its true output format as PDF/A 1b
(fmt/354) and not PDF/A 1a (fmt/95).
"""
FPCommand = apps.get_model('fpr', 'FPCommand')
FormatVersion = apps.get_model('fpr', 'FormatVersion')
true_format_version = FormatVersion.objects.get(pronom_id='fmt/354')
FPCommand.objects.filter(
uuid='d6a33093-85d5-4088-83e1-b7a774a826bd').update(
output_format=true_format_version)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0018_slug_unique'),
]
operations = [
migrations.RunPython(data_migration),
]
|
<commit_before><commit_msg>Fix the Ghostscript normalization output format
Adds a migration to fix the Ghostscript normalization command "Command
Transcoding to pdfa with Ghostscript" so that it documents its true output
format as PDF/A 1b (fmt/354) and not PDF/A 1a (fmt/95).
Contributes to fixing https://github.com/artefactual/archivematica/issues/1158<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Fix the Ghostscript normalization command "Command Transcoding to pdfa
with Ghostscript" so that it documents its true output format as PDF/A 1b
(fmt/354) and not PDF/A 1a (fmt/95).
"""
FPCommand = apps.get_model('fpr', 'FPCommand')
FormatVersion = apps.get_model('fpr', 'FormatVersion')
true_format_version = FormatVersion.objects.get(pronom_id='fmt/354')
FPCommand.objects.filter(
uuid='d6a33093-85d5-4088-83e1-b7a774a826bd').update(
output_format=true_format_version)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0018_slug_unique'),
]
operations = [
migrations.RunPython(data_migration),
]
|
|
bb3573ce7b0eb93e202a7dba13f2c1eedcca1275
|
moderation_queue/migrations/0003_auto_20150301_2035.py
|
moderation_queue/migrations/0003_auto_20150301_2035.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0002_auto_20150213_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='justification_for_use',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Add a forgotten migration for QueuedImage
|
Add a forgotten migration for QueuedImage
This should have been generated when blank=True was added to the
justification_for_use field, but I forgot.
|
Python
|
agpl-3.0
|
neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,openstate/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative
|
Add a forgotten migration for QueuedImage
This should have been generated when blank=True was added to the
justification_for_use field, but I forgot.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0002_auto_20150213_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='justification_for_use',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a forgotten migration for QueuedImage
This should have been generated when blank=True was added to the
justification_for_use field, but I forgot.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0002_auto_20150213_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='justification_for_use',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Add a forgotten migration for QueuedImage
This should have been generated when blank=True was added to the
justification_for_use field, but I forgot.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0002_auto_20150213_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='justification_for_use',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a forgotten migration for QueuedImage
This should have been generated when blank=True was added to the
justification_for_use field, but I forgot.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0002_auto_20150213_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='justification_for_use',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
|
d8b396db59199f9221a3bf7ac9d75f1edfd07d4c
|
spec/image_spec.py
|
spec/image_spec.py
|
from spec.helper import *
from pygame import Surface
from pygametemplate import Image
with description("pygametemplate.Image"):
with it("should initialise correctly, without loading a Surface object"):
self.image = Image("test")
expect(self.image.name).to(equal("test"))
expect(self.image.image).to(be(None))
with it("should be able to load its image Surface into RAM"):
self.image.load()
expect(self.image.image).to(be_a(Surface))
with it("should be able to unload its image Surface from RAM"):
self.image.unload()
expect(self.image.image).to(be(None))
with it("should load its Surface if .display() is called on it"):
self.image.display(game.screen, (0, 0))
expect(self.image.image).to(be_a(Surface))
|
Add unit tests for upcoming pygametemplate.Image class
|
Add unit tests for upcoming pygametemplate.Image class
|
Python
|
mit
|
AndyDeany/pygame-template
|
Add unit tests for upcoming pygametemplate.Image class
|
from spec.helper import *
from pygame import Surface
from pygametemplate import Image
with description("pygametemplate.Image"):
with it("should initialise correctly, without loading a Surface object"):
self.image = Image("test")
expect(self.image.name).to(equal("test"))
expect(self.image.image).to(be(None))
with it("should be able to load its image Surface into RAM"):
self.image.load()
expect(self.image.image).to(be_a(Surface))
with it("should be able to unload its image Surface from RAM"):
self.image.unload()
expect(self.image.image).to(be(None))
with it("should load its Surface if .display() is called on it"):
self.image.display(game.screen, (0, 0))
expect(self.image.image).to(be_a(Surface))
|
<commit_before><commit_msg>Add unit tests for upcoming pygametemplate.Image class<commit_after>
|
from spec.helper import *
from pygame import Surface
from pygametemplate import Image
with description("pygametemplate.Image"):
with it("should initialise correctly, without loading a Surface object"):
self.image = Image("test")
expect(self.image.name).to(equal("test"))
expect(self.image.image).to(be(None))
with it("should be able to load its image Surface into RAM"):
self.image.load()
expect(self.image.image).to(be_a(Surface))
with it("should be able to unload its image Surface from RAM"):
self.image.unload()
expect(self.image.image).to(be(None))
with it("should load its Surface if .display() is called on it"):
self.image.display(game.screen, (0, 0))
expect(self.image.image).to(be_a(Surface))
|
Add unit tests for upcoming pygametemplate.Image classfrom spec.helper import *
from pygame import Surface
from pygametemplate import Image
with description("pygametemplate.Image"):
with it("should initialise correctly, without loading a Surface object"):
self.image = Image("test")
expect(self.image.name).to(equal("test"))
expect(self.image.image).to(be(None))
with it("should be able to load its image Surface into RAM"):
self.image.load()
expect(self.image.image).to(be_a(Surface))
with it("should be able to unload its image Surface from RAM"):
self.image.unload()
expect(self.image.image).to(be(None))
with it("should load its Surface if .display() is called on it"):
self.image.display(game.screen, (0, 0))
expect(self.image.image).to(be_a(Surface))
|
<commit_before><commit_msg>Add unit tests for upcoming pygametemplate.Image class<commit_after>from spec.helper import *
from pygame import Surface
from pygametemplate import Image
with description("pygametemplate.Image"):
with it("should initialise correctly, without loading a Surface object"):
self.image = Image("test")
expect(self.image.name).to(equal("test"))
expect(self.image.image).to(be(None))
with it("should be able to load its image Surface into RAM"):
self.image.load()
expect(self.image.image).to(be_a(Surface))
with it("should be able to unload its image Surface from RAM"):
self.image.unload()
expect(self.image.image).to(be(None))
with it("should load its Surface if .display() is called on it"):
self.image.display(game.screen, (0, 0))
expect(self.image.image).to(be_a(Surface))
|
|
6fca9da8cbf5318a2f7cdee16d4136b7b54088a5
|
src/cron/migrations/0004_auto_20210831_1159.py
|
src/cron/migrations/0004_auto_20210831_1159.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-31 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0003_auto_20171121_1115'),
]
operations = [
migrations.AlterField(
model_name='reminder',
name='template_name',
field=models.CharField(help_text="The name of the email template, if it doesn't exist you will be asked to create it. Should have no spaces.", max_length=100),
),
migrations.AlterField(
model_name='reminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
migrations.AlterField(
model_name='sentreminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
]
|
Add migration for reminder update
|
Add migration for reminder update
|
Python
|
agpl-3.0
|
BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway
|
Add migration for reminder update
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-31 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0003_auto_20171121_1115'),
]
operations = [
migrations.AlterField(
model_name='reminder',
name='template_name',
field=models.CharField(help_text="The name of the email template, if it doesn't exist you will be asked to create it. Should have no spaces.", max_length=100),
),
migrations.AlterField(
model_name='reminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
migrations.AlterField(
model_name='sentreminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
]
|
<commit_before><commit_msg>Add migration for reminder update<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-31 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0003_auto_20171121_1115'),
]
operations = [
migrations.AlterField(
model_name='reminder',
name='template_name',
field=models.CharField(help_text="The name of the email template, if it doesn't exist you will be asked to create it. Should have no spaces.", max_length=100),
),
migrations.AlterField(
model_name='reminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
migrations.AlterField(
model_name='sentreminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
]
|
Add migration for reminder update# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-31 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0003_auto_20171121_1115'),
]
operations = [
migrations.AlterField(
model_name='reminder',
name='template_name',
field=models.CharField(help_text="The name of the email template, if it doesn't exist you will be asked to create it. Should have no spaces.", max_length=100),
),
migrations.AlterField(
model_name='reminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
migrations.AlterField(
model_name='sentreminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
]
|
<commit_before><commit_msg>Add migration for reminder update<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-31 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cron', '0003_auto_20171121_1115'),
]
operations = [
migrations.AlterField(
model_name='reminder',
name='template_name',
field=models.CharField(help_text="The name of the email template, if it doesn't exist you will be asked to create it. Should have no spaces.", max_length=100),
),
migrations.AlterField(
model_name='reminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
migrations.AlterField(
model_name='sentreminder',
name='type',
field=models.CharField(choices=[('review', 'Review (Invited)'), ('accepted-review', 'Review (Accepted)'), ('revisions', 'Revision')], max_length=100),
),
]
|
|
a6c3622ffbe6f02bab8a9be2c8949771d8729d95
|
fibonacci_message_encoding/main.py
|
fibonacci_message_encoding/main.py
|
import string
# Pseudocode
## Encode
def encode(secret_string, wordlist=[]):
# TODO: wordlist is not intended for production
message = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, char in enumerate(secret_string):
suitable_words = filter(lambda word: word[(fibonacci_values[i] - 1) % len(word)] is char,
wordlist)
# TODO: Catch StopIteration (meaning that there are no suitable words)
message.append(next(suitable_words))
return message
## Decode
def decode(message_list):
secret = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, word in enumerate(message_list):
# Find char s_n at position p where
# p = (V_m_n - 1) mod length(w_n)
#position = divmod((fibonacci_values[i] - 1), len(word))
position = (fibonacci_values[i] - 1) % len(word)
secret_character = word[position]
secret.append(secret_character)
return secret
def main():
print("Encoding \"william\"")
print(encode("william", wordlist=["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"]))
print("Decoding the encode: ", end='')
decoded = decode(["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"])
print("".join(decoded))
if __name__ == "__main__":
main()
|
Add basic proof of concept: encode and decode
|
Add basic proof of concept: encode and decode
|
Python
|
mit
|
Telkkar/fibonacci_message_encoding
|
Add basic proof of concept: encode and decode
|
import string
# Pseudocode
## Encode
def encode(secret_string, wordlist=[]):
# TODO: wordlist is not intended for production
message = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, char in enumerate(secret_string):
suitable_words = filter(lambda word: word[(fibonacci_values[i] - 1) % len(word)] is char,
wordlist)
# TODO: Catch StopIteration (meaning that there are no suitable words)
message.append(next(suitable_words))
return message
## Decode
def decode(message_list):
secret = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, word in enumerate(message_list):
# Find char s_n at position p where
# p = (V_m_n - 1) mod length(w_n)
#position = divmod((fibonacci_values[i] - 1), len(word))
position = (fibonacci_values[i] - 1) % len(word)
secret_character = word[position]
secret.append(secret_character)
return secret
def main():
print("Encoding \"william\"")
print(encode("william", wordlist=["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"]))
print("Decoding the encode: ", end='')
decoded = decode(["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"])
print("".join(decoded))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add basic proof of concept: encode and decode<commit_after>
|
import string
# Pseudocode
## Encode
def encode(secret_string, wordlist=[]):
# TODO: wordlist is not intended for production
message = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, char in enumerate(secret_string):
suitable_words = filter(lambda word: word[(fibonacci_values[i] - 1) % len(word)] is char,
wordlist)
# TODO: Catch StopIteration (meaning that there are no suitable words)
message.append(next(suitable_words))
return message
## Decode
def decode(message_list):
secret = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, word in enumerate(message_list):
# Find char s_n at position p where
# p = (V_m_n - 1) mod length(w_n)
#position = divmod((fibonacci_values[i] - 1), len(word))
position = (fibonacci_values[i] - 1) % len(word)
secret_character = word[position]
secret.append(secret_character)
return secret
def main():
print("Encoding \"william\"")
print(encode("william", wordlist=["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"]))
print("Decoding the encode: ", end='')
decoded = decode(["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"])
print("".join(decoded))
if __name__ == "__main__":
main()
|
Add basic proof of concept: encode and decodeimport string
# Pseudocode
## Encode
def encode(secret_string, wordlist=[]):
# TODO: wordlist is not intended for production
message = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, char in enumerate(secret_string):
suitable_words = filter(lambda word: word[(fibonacci_values[i] - 1) % len(word)] is char,
wordlist)
# TODO: Catch StopIteration (meaning that there are no suitable words)
message.append(next(suitable_words))
return message
## Decode
def decode(message_list):
secret = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, word in enumerate(message_list):
# Find char s_n at position p where
# p = (V_m_n - 1) mod length(w_n)
#position = divmod((fibonacci_values[i] - 1), len(word))
position = (fibonacci_values[i] - 1) % len(word)
secret_character = word[position]
secret.append(secret_character)
return secret
def main():
print("Encoding \"william\"")
print(encode("william", wordlist=["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"]))
print("Decoding the encode: ", end='')
decoded = decode(["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"])
print("".join(decoded))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add basic proof of concept: encode and decode<commit_after>import string
# Pseudocode
## Encode
def encode(secret_string, wordlist=[]):
# TODO: wordlist is not intended for production
message = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, char in enumerate(secret_string):
suitable_words = filter(lambda word: word[(fibonacci_values[i] - 1) % len(word)] is char,
wordlist)
# TODO: Catch StopIteration (meaning that there are no suitable words)
message.append(next(suitable_words))
return message
## Decode
def decode(message_list):
secret = []
# Generate fibo values V_m_n using m_0
# TODO: add possibility of calculating closed-form fibo numbers using starting position
fibonacci_values = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
for i, word in enumerate(message_list):
# Find char s_n at position p where
# p = (V_m_n - 1) mod length(w_n)
#position = divmod((fibonacci_values[i] - 1), len(word))
position = (fibonacci_values[i] - 1) % len(word)
secret_character = word[position]
secret.append(secret_character)
return secret
def main():
print("Encoding \"william\"")
print(encode("william", wordlist=["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"]))
print("Decoding the encode: ", end='')
decoded = decode(["why", "ignites", "sleep", "bold", "heroic", "aura", "mail"])
print("".join(decoded))
if __name__ == "__main__":
main()
|
|
bc293b675b067574f8c9da0e18d5d958e4ca568b
|
stationspinner/sde/migrations/0008_auto_20150715_1544.py
|
stationspinner/sde/migrations/0008_auto_20150715_1544.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sde', '0007_remove_invtype_chanceofduplicating'),
]
operations = [
migrations.RemoveField(
model_name='invcategory',
name='description',
),
migrations.RemoveField(
model_name='invgroup',
name='allowManufacture',
),
migrations.RemoveField(
model_name='invgroup',
name='allowRecycler',
),
migrations.RemoveField(
model_name='invgroup',
name='description',
),
]
|
Migrate SDE to remove redundant fields
|
Migrate SDE to remove redundant fields
|
Python
|
agpl-3.0
|
kriberg/stationspinner,kriberg/stationspinner
|
Migrate SDE to remove redundant fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sde', '0007_remove_invtype_chanceofduplicating'),
]
operations = [
migrations.RemoveField(
model_name='invcategory',
name='description',
),
migrations.RemoveField(
model_name='invgroup',
name='allowManufacture',
),
migrations.RemoveField(
model_name='invgroup',
name='allowRecycler',
),
migrations.RemoveField(
model_name='invgroup',
name='description',
),
]
|
<commit_before><commit_msg>Migrate SDE to remove redundant fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sde', '0007_remove_invtype_chanceofduplicating'),
]
operations = [
migrations.RemoveField(
model_name='invcategory',
name='description',
),
migrations.RemoveField(
model_name='invgroup',
name='allowManufacture',
),
migrations.RemoveField(
model_name='invgroup',
name='allowRecycler',
),
migrations.RemoveField(
model_name='invgroup',
name='description',
),
]
|
Migrate SDE to remove redundant fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sde', '0007_remove_invtype_chanceofduplicating'),
]
operations = [
migrations.RemoveField(
model_name='invcategory',
name='description',
),
migrations.RemoveField(
model_name='invgroup',
name='allowManufacture',
),
migrations.RemoveField(
model_name='invgroup',
name='allowRecycler',
),
migrations.RemoveField(
model_name='invgroup',
name='description',
),
]
|
<commit_before><commit_msg>Migrate SDE to remove redundant fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sde', '0007_remove_invtype_chanceofduplicating'),
]
operations = [
migrations.RemoveField(
model_name='invcategory',
name='description',
),
migrations.RemoveField(
model_name='invgroup',
name='allowManufacture',
),
migrations.RemoveField(
model_name='invgroup',
name='allowRecycler',
),
migrations.RemoveField(
model_name='invgroup',
name='description',
),
]
|
|
0c1a0a70154ddf107a6174d49793e369d28f1beb
|
openstack_dashboard/views.py
|
openstack_dashboard/views.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from openstack_auth import views
def get_user_home(user):
if user.is_superuser:
return horizon.get_dashboard('admin').get_absolute_url()
return horizon.get_dashboard('project').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
Fix default get_user_home with dynamic dashboards
|
Fix default get_user_home with dynamic dashboards
The existing get_user_home implementation expects both the 'admin'
and 'project' dashboards to exist and throws an exception if they
are missing. With the inclusion of configurable dashboard loading,
we can no longer count on certain dashboards being loaded.
Closes-Bug: #1293727
Change-Id: I4ee0b7b313f4e1b27c0daea829c8b38282fa78d9
|
Python
|
apache-2.0
|
bigswitch/horizon,tsufiev/horizon,froyobin/horizon,NeCTAR-RC/horizon,kfox1111/horizon,NeCTAR-RC/horizon,yjxtogo/horizon,RudoCris/horizon,watonyweng/horizon,philoniare/horizon,philoniare/horizon,noironetworks/horizon,eayunstack/horizon,CiscoSystems/avos,Dark-Hacker/horizon,zouyapeng/horizon,nvoron23/avos,mrunge/horizon,xinwu/horizon,agileblaze/OpenStackTwoFactorAuthentication,VaneCloud/horizon,mdavid/horizon,gerrive/horizon,mrunge/openstack_horizon,Daniex/horizon,openstack/horizon,orbitfp7/horizon,yeming233/horizon,kfox1111/horizon,Solinea/horizon,xinwu/horizon,damien-dg/horizon,tqtran7/horizon,mrunge/openstack_horizon,flochaz/horizon,tanglei528/horizon,doug-fish/horizon,Daniex/horizon,wolverineav/horizon,dan1/horizon-x509,saydulk/horizon,ChameleonCloud/horizon,Solinea/horizon,yeming233/horizon,anthonydillon/horizon,tellesnobrega/horizon,Tesora/tesora-horizon,wolverineav/horizon,VaneCloud/horizon,Mirantis/mos-horizon,henaras/horizon,pranavtendolkr/horizon,NCI-Cloud/horizon,eayunstack/horizon,blueboxgroup/horizon,yjxtogo/horizon,takeshineshiro/horizon,tellesnobrega/horizon,pranavtendolkr/horizon,endorphinl/horizon,tanglei528/horizon,davidcusatis/horizon,endorphinl/horizon,yjxtogo/horizon,endorphinl/horizon,anthonydillon/horizon,CiscoSystems/horizon,saydulk/horizon,Metaswitch/horizon,mrunge/openstack_horizon,mdavid/horizon,idjaw/horizon,watonyweng/horizon,sandvine/horizon,maestro-hybrid-cloud/horizon,django-leonardo/horizon,pranavtendolkr/horizon,VaneCloud/horizon,izadorozhna/dashboard_integration_tests,henaras/horizon,karthik-suresh/horizon,kfox1111/horizon,CiscoSystems/avos,saydulk/horizon,coreycb/horizon,eayunstack/horizon,dan1/horizon-x509,j4/horizon,maestro-hybrid-cloud/horizon,FNST-OpenStack/horizon,aaronorosen/horizon-congress,xinwu/horizon,VaneCloud/horizon,Hodorable/0602,mandeepdhami/horizon,newrocknj/horizon,NCI-Cloud/horizon,aaronorosen/horizon-congress,kfox1111/horizon,endorphinl/horizon-fork,redhat-cip/horizon,CiscoSystems/avos,j4/horizon,watonyweng/horizon,FNST-OpenStack/horizon,bac/horizon,NCI-Cloud/horizon,endorphinl/horizon,RudoCris/horizon,orbitfp7/horizon,coreycb/horizon,saydulk/horizon,luhanhan/horizon,dan1/horizon-proto,henaras/horizon,luhanhan/horizon,tqtran7/horizon,tellesnobrega/horizon,doug-fish/horizon,ging/horizon,mrunge/horizon_lib,vladryk/horizon,Dark-Hacker/horizon,Metaswitch/horizon,CiscoSystems/horizon,icloudrnd/automation_tools,sandvine/horizon,luhanhan/horizon,idjaw/horizon,flochaz/horizon,NCI-Cloud/horizon,JioCloud/horizon,damien-dg/horizon,django-leonardo/horizon,luhanhan/horizon,flochaz/horizon,wolverineav/horizon,FNST-OpenStack/horizon,takeshineshiro/horizon,Tesora/tesora-horizon,CiscoSystems/horizon,promptworks/horizon,xme1226/horizon,zouyapeng/horizon,django-leonardo/horizon,nvoron23/avos,Dark-Hacker/horizon,endorphinl/horizon-fork,BiznetGIO/horizon,mandeepdhami/horizon,Metaswitch/horizon,redhat-cip/horizon,coreycb/horizon,noironetworks/horizon,Daniex/horizon,Dark-Hacker/horizon,ChameleonCloud/horizon,mdavid/horizon,tsufiev/horizon,yeming233/horizon,CiscoSystems/horizon,JioCloud/horizon,xme1226/horizon,idjaw/horizon,bigswitch/horizon,ging/horizon,Metaswitch/horizon,BiznetGIO/horizon,karthik-suresh/horizon,mdavid/horizon,Daniex/horizon,philoniare/horizon,vladryk/horizon,froyobin/horizon,django-leonardo/horizon,agileblaze/OpenStackTwoFactorAuthentication,Solinea/horizon,mandeepdhami/horizon,newrocknj/horizon,wangxiangyu/horizon,philoniare/horizon,anthonydillon/horizon,gerrive/horizon,openstack/horizon,redhat-openstack/horizon,doug-fish/horizon,Mirantis/mos-horizon,nvoron23/avos,vladryk/horizon,endorphinl/horizon-fork,liyitest/rr,zouyapeng/horizon,openstack/horizon,icloudrnd/automation_tools,liyitest/rr,tqtran7/horizon,Mirantis/mos-horizon,j4/horizon,yjxtogo/horizon,wolverineav/horizon,ging/horizon,zouyapeng/horizon,henaras/horizon,promptworks/horizon,promptworks/horizon,endorphinl/horizon-fork,promptworks/horizon,blueboxgroup/horizon,blueboxgroup/horizon,tsufiev/horizon,blueboxgroup/horizon,BiznetGIO/horizon,ChameleonCloud/horizon,yeming233/horizon,Mirantis/mos-horizon,noironetworks/horizon,tqtran7/horizon,davidcusatis/horizon,bac/horizon,redhat-cip/horizon,froyobin/horizon,pranavtendolkr/horizon,liyitest/rr,newrocknj/horizon,redhat-openstack/horizon,tsufiev/horizon,Tesora/tesora-horizon,anthonydillon/horizon,wangxiangyu/horizon,dan1/horizon-x509,davidcusatis/horizon,bigswitch/horizon,redhat-openstack/horizon,dan1/horizon-proto,karthik-suresh/horizon,wangxiangyu/horizon,doug-fish/horizon,ging/horizon,JioCloud/horizon,watonyweng/horizon,takeshineshiro/horizon,sandvine/horizon,damien-dg/horizon,NeCTAR-RC/horizon,davidcusatis/horizon,mandeepdhami/horizon,bigswitch/horizon,aaronorosen/horizon-congress,karthik-suresh/horizon,idjaw/horizon,izadorozhna/dashboard_integration_tests,mrunge/horizon_lib,vladryk/horizon,Hodorable/0602,agileblaze/OpenStackTwoFactorAuthentication,xme1226/horizon,orbitfp7/horizon,BiznetGIO/horizon,mrunge/horizon_lib,noironetworks/horizon,gerrive/horizon,Hodorable/0602,agileblaze/OpenStackTwoFactorAuthentication,takeshineshiro/horizon,dan1/horizon-proto,maestro-hybrid-cloud/horizon,CiscoSystems/avos,damien-dg/horizon,sandvine/horizon,mrunge/horizon,FNST-OpenStack/horizon,wangxiangyu/horizon,Solinea/horizon,newrocknj/horizon,dan1/horizon-x509,j4/horizon,RudoCris/horizon,redhat-openstack/horizon,coreycb/horizon,orbitfp7/horizon,gerrive/horizon,mrunge/horizon,RudoCris/horizon,dan1/horizon-proto,tanglei528/horizon,openstack/horizon,icloudrnd/automation_tools,flochaz/horizon,nvoron23/avos,icloudrnd/automation_tools,NeCTAR-RC/horizon,bac/horizon,ChameleonCloud/horizon,redhat-cip/horizon,Hodorable/0602,maestro-hybrid-cloud/horizon,liyitest/rr,bac/horizon,Tesora/tesora-horizon,tellesnobrega/horizon,xinwu/horizon
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from openstack_auth import views
def get_user_home(user):
if user.is_superuser:
return horizon.get_dashboard('admin').get_absolute_url()
return horizon.get_dashboard('project').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
Fix default get_user_home with dynamic dashboards
The existing get_user_home implementation expects both the 'admin'
and 'project' dashboards to exist and throws an exception if they
are missing. With the inclusion of configurable dashboard loading,
we can no longer count on certain dashboards being loaded.
Closes-Bug: #1293727
Change-Id: I4ee0b7b313f4e1b27c0daea829c8b38282fa78d9
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
<commit_before># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from openstack_auth import views
def get_user_home(user):
if user.is_superuser:
return horizon.get_dashboard('admin').get_absolute_url()
return horizon.get_dashboard('project').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
<commit_msg>Fix default get_user_home with dynamic dashboards
The existing get_user_home implementation expects both the 'admin'
and 'project' dashboards to exist and throws an exception if they
are missing. With the inclusion of configurable dashboard loading,
we can no longer count on certain dashboards being loaded.
Closes-Bug: #1293727
Change-Id: I4ee0b7b313f4e1b27c0daea829c8b38282fa78d9<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from openstack_auth import views
def get_user_home(user):
if user.is_superuser:
return horizon.get_dashboard('admin').get_absolute_url()
return horizon.get_dashboard('project').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
Fix default get_user_home with dynamic dashboards
The existing get_user_home implementation expects both the 'admin'
and 'project' dashboards to exist and throws an exception if they
are missing. With the inclusion of configurable dashboard loading,
we can no longer count on certain dashboards being loaded.
Closes-Bug: #1293727
Change-Id: I4ee0b7b313f4e1b27c0daea829c8b38282fa78d9# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
<commit_before># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from openstack_auth import views
def get_user_home(user):
if user.is_superuser:
return horizon.get_dashboard('admin').get_absolute_url()
return horizon.get_dashboard('project').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
<commit_msg>Fix default get_user_home with dynamic dashboards
The existing get_user_home implementation expects both the 'admin'
and 'project' dashboards to exist and throws an exception if they
are missing. With the inclusion of configurable dashboard loading,
we can no longer count on certain dashboards being loaded.
Closes-Bug: #1293727
Change-Id: I4ee0b7b313f4e1b27c0daea829c8b38282fa78d9<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon import base
from openstack_auth import views
def get_user_home(user):
dashboard = None
if user.is_superuser:
try:
dashboard = horizon.get_dashboard('admin')
except base.NotRegistered:
pass
if dashboard is None:
dashboard = horizon.get_default_dashboard()
return dashboard.get_absolute_url()
@vary.vary_on_cookie
def splash(request):
if request.user.is_authenticated():
return shortcuts.redirect(horizon.get_user_home(request.user))
form = views.Login(request)
request.session.clear()
request.session.set_test_cookie()
return shortcuts.render(request, 'splash.html', {'form': form})
|
085d280f450b6516d377bd7f7dbdbbd5004cd963
|
oscar/apps/payment/utils.py
|
oscar/apps/payment/utils.py
|
import warnings
from . import models
def Bankcard(card_number, expiry_date, name=None,
cvv=None, start_date=None, issue_number=None):
# This odd looking thing is to handle backwards compatibility with Oscar
# 0.5 where the Bankcard class wasn't a model and lived in this utils
# module. As of 0.6, the Bankcard class is a model.
#
# We pretend to be a class here (hence the capitalisation), remap the
# constructor args and return an instance of the new class.
warnings.warn("The Bankcard class has moved to oscar.apps.payment.models",
DeprecationWarning)
kwargs = {
'number': card_number,
'expiry_date': expiry_date,
'name': name,
'ccv': cvv,
'start_date': start_date,
'issue_number': issue_number
}
return models.Bankcard(**kwargs)
|
Add backkwards-compatible Bankcard class factory
|
Add backkwards-compatible Bankcard class factory
This provides a way for extensions that need to work with Oscar 0.5 and
0.6 to consistently load the Banckard class (even though it was moved
and had its API changed between versions).
|
Python
|
bsd-3-clause
|
QLGu/django-oscar,kapt/django-oscar,Idematica/django-oscar,anentropic/django-oscar,Bogh/django-oscar,nickpack/django-oscar,saadatqadri/django-oscar,jmt4/django-oscar,michaelkuty/django-oscar,bnprk/django-oscar,sonofatailor/django-oscar,thechampanurag/django-oscar,kapari/django-oscar,nfletton/django-oscar,nickpack/django-oscar,sonofatailor/django-oscar,lijoantony/django-oscar,nfletton/django-oscar,eddiep1101/django-oscar,jinnykoo/wuyisj,machtfit/django-oscar,ka7eh/django-oscar,manevant/django-oscar,pdonadeo/django-oscar,anentropic/django-oscar,ahmetdaglarbas/e-commerce,thechampanurag/django-oscar,jlmadurga/django-oscar,jinnykoo/christmas,solarissmoke/django-oscar,eddiep1101/django-oscar,okfish/django-oscar,WillisXChen/django-oscar,bnprk/django-oscar,bschuon/django-oscar,michaelkuty/django-oscar,pdonadeo/django-oscar,taedori81/django-oscar,Idematica/django-oscar,QLGu/django-oscar,marcoantoniooliveira/labweb,ahmetdaglarbas/e-commerce,john-parton/django-oscar,sasha0/django-oscar,elliotthill/django-oscar,QLGu/django-oscar,itbabu/django-oscar,itbabu/django-oscar,spartonia/django-oscar,Jannes123/django-oscar,saadatqadri/django-oscar,taedori81/django-oscar,itbabu/django-oscar,okfish/django-oscar,manevant/django-oscar,WadeYuChen/django-oscar,okfish/django-oscar,django-oscar/django-oscar,ademuk/django-oscar,pasqualguerrero/django-oscar,bschuon/django-oscar,Idematica/django-oscar,jmt4/django-oscar,jinnykoo/wuyisj,jinnykoo/christmas,sasha0/django-oscar,Jannes123/django-oscar,nfletton/django-oscar,kapt/django-oscar,vovanbo/django-oscar,pdonadeo/django-oscar,kapari/django-oscar,pasqualguerrero/django-oscar,kapari/django-oscar,WadeYuChen/django-oscar,Jannes123/django-oscar,kapt/django-oscar,lijoantony/django-oscar,django-oscar/django-oscar,bnprk/django-oscar,ademuk/django-oscar,machtfit/django-oscar,anentropic/django-oscar,lijoantony/django-oscar,solarissmoke/django-oscar,MatthewWilkes/django-oscar,sonofatailor/django-oscar,eddiep1101/django-oscar,binarydud/django-oscar,mexeniz/django-oscar,ademuk/django-oscar,jinnykoo/christmas,monikasulik/django-oscar,ahmetdaglarbas/e-commerce,bschuon/django-oscar,pdonadeo/django-oscar,dongguangming/django-oscar,Jannes123/django-oscar,elliotthill/django-oscar,jmt4/django-oscar,marcoantoniooliveira/labweb,DrOctogon/unwash_ecom,anentropic/django-oscar,taedori81/django-oscar,monikasulik/django-oscar,adamend/django-oscar,eddiep1101/django-oscar,dongguangming/django-oscar,MatthewWilkes/django-oscar,rocopartners/django-oscar,spartonia/django-oscar,MatthewWilkes/django-oscar,elliotthill/django-oscar,WillisXChen/django-oscar,jlmadurga/django-oscar,lijoantony/django-oscar,mexeniz/django-oscar,amirrpp/django-oscar,mexeniz/django-oscar,WadeYuChen/django-oscar,amirrpp/django-oscar,jinnykoo/wuyisj,adamend/django-oscar,rocopartners/django-oscar,manevant/django-oscar,michaelkuty/django-oscar,ka7eh/django-oscar,saadatqadri/django-oscar,faratro/django-oscar,binarydud/django-oscar,vovanbo/django-oscar,vovanbo/django-oscar,amirrpp/django-oscar,ka7eh/django-oscar,michaelkuty/django-oscar,faratro/django-oscar,thechampanurag/django-oscar,jinnykoo/wuyisj.com,josesanch/django-oscar,rocopartners/django-oscar,WillisXChen/django-oscar,WillisXChen/django-oscar,bnprk/django-oscar,DrOctogon/unwash_ecom,mexeniz/django-oscar,binarydud/django-oscar,jinnykoo/wuyisj.com,solarissmoke/django-oscar,vovanbo/django-oscar,jinnykoo/wuyisj,django-oscar/django-oscar,okfish/django-oscar,jinnykoo/wuyisj.com,faratro/django-oscar,marcoantoniooliveira/labweb,adamend/django-oscar,spartonia/django-oscar,monikasulik/django-oscar,binarydud/django-oscar,sonofatailor/django-oscar,kapari/django-oscar,jinnykoo/wuyisj.com,Bogh/django-oscar,faratro/django-oscar,sasha0/django-oscar,ahmetdaglarbas/e-commerce,WillisXChen/django-oscar,thechampanurag/django-oscar,sasha0/django-oscar,DrOctogon/unwash_ecom,amirrpp/django-oscar,ka7eh/django-oscar,pasqualguerrero/django-oscar,josesanch/django-oscar,nfletton/django-oscar,jlmadurga/django-oscar,jlmadurga/django-oscar,john-parton/django-oscar,WadeYuChen/django-oscar,Bogh/django-oscar,Bogh/django-oscar,nickpack/django-oscar,rocopartners/django-oscar,QLGu/django-oscar,ademuk/django-oscar,nickpack/django-oscar,marcoantoniooliveira/labweb,adamend/django-oscar,django-oscar/django-oscar,machtfit/django-oscar,john-parton/django-oscar,josesanch/django-oscar,WillisXChen/django-oscar,monikasulik/django-oscar,MatthewWilkes/django-oscar,john-parton/django-oscar,dongguangming/django-oscar,jmt4/django-oscar,saadatqadri/django-oscar,dongguangming/django-oscar,spartonia/django-oscar,manevant/django-oscar,bschuon/django-oscar,taedori81/django-oscar,itbabu/django-oscar,pasqualguerrero/django-oscar,solarissmoke/django-oscar
|
Add backkwards-compatible Bankcard class factory
This provides a way for extensions that need to work with Oscar 0.5 and
0.6 to consistently load the Banckard class (even though it was moved
and had its API changed between versions).
|
import warnings
from . import models
def Bankcard(card_number, expiry_date, name=None,
cvv=None, start_date=None, issue_number=None):
# This odd looking thing is to handle backwards compatibility with Oscar
# 0.5 where the Bankcard class wasn't a model and lived in this utils
# module. As of 0.6, the Bankcard class is a model.
#
# We pretend to be a class here (hence the capitalisation), remap the
# constructor args and return an instance of the new class.
warnings.warn("The Bankcard class has moved to oscar.apps.payment.models",
DeprecationWarning)
kwargs = {
'number': card_number,
'expiry_date': expiry_date,
'name': name,
'ccv': cvv,
'start_date': start_date,
'issue_number': issue_number
}
return models.Bankcard(**kwargs)
|
<commit_before><commit_msg>Add backkwards-compatible Bankcard class factory
This provides a way for extensions that need to work with Oscar 0.5 and
0.6 to consistently load the Banckard class (even though it was moved
and had its API changed between versions).<commit_after>
|
import warnings
from . import models
def Bankcard(card_number, expiry_date, name=None,
cvv=None, start_date=None, issue_number=None):
# This odd looking thing is to handle backwards compatibility with Oscar
# 0.5 where the Bankcard class wasn't a model and lived in this utils
# module. As of 0.6, the Bankcard class is a model.
#
# We pretend to be a class here (hence the capitalisation), remap the
# constructor args and return an instance of the new class.
warnings.warn("The Bankcard class has moved to oscar.apps.payment.models",
DeprecationWarning)
kwargs = {
'number': card_number,
'expiry_date': expiry_date,
'name': name,
'ccv': cvv,
'start_date': start_date,
'issue_number': issue_number
}
return models.Bankcard(**kwargs)
|
Add backkwards-compatible Bankcard class factory
This provides a way for extensions that need to work with Oscar 0.5 and
0.6 to consistently load the Banckard class (even though it was moved
and had its API changed between versions).import warnings
from . import models
def Bankcard(card_number, expiry_date, name=None,
cvv=None, start_date=None, issue_number=None):
# This odd looking thing is to handle backwards compatibility with Oscar
# 0.5 where the Bankcard class wasn't a model and lived in this utils
# module. As of 0.6, the Bankcard class is a model.
#
# We pretend to be a class here (hence the capitalisation), remap the
# constructor args and return an instance of the new class.
warnings.warn("The Bankcard class has moved to oscar.apps.payment.models",
DeprecationWarning)
kwargs = {
'number': card_number,
'expiry_date': expiry_date,
'name': name,
'ccv': cvv,
'start_date': start_date,
'issue_number': issue_number
}
return models.Bankcard(**kwargs)
|
<commit_before><commit_msg>Add backkwards-compatible Bankcard class factory
This provides a way for extensions that need to work with Oscar 0.5 and
0.6 to consistently load the Banckard class (even though it was moved
and had its API changed between versions).<commit_after>import warnings
from . import models
def Bankcard(card_number, expiry_date, name=None,
cvv=None, start_date=None, issue_number=None):
# This odd looking thing is to handle backwards compatibility with Oscar
# 0.5 where the Bankcard class wasn't a model and lived in this utils
# module. As of 0.6, the Bankcard class is a model.
#
# We pretend to be a class here (hence the capitalisation), remap the
# constructor args and return an instance of the new class.
warnings.warn("The Bankcard class has moved to oscar.apps.payment.models",
DeprecationWarning)
kwargs = {
'number': card_number,
'expiry_date': expiry_date,
'name': name,
'ccv': cvv,
'start_date': start_date,
'issue_number': issue_number
}
return models.Bankcard(**kwargs)
|
|
a1e5ebded1a44480dd71bdb267087683cf330a67
|
webvtt/generic.py
|
webvtt/generic.py
|
class Caption(object):
def __init__(self, start, end, lines=None):
self.start = start
self.end = end
self.lines = lines or []
def add_line(self, line):
self.lines.append(line)
|
Add class to store caption data
|
Add class to store caption data
|
Python
|
mit
|
glut23/webvtt-py,sampattuzzi/webvtt-py
|
Add class to store caption data
|
class Caption(object):
def __init__(self, start, end, lines=None):
self.start = start
self.end = end
self.lines = lines or []
def add_line(self, line):
self.lines.append(line)
|
<commit_before><commit_msg>Add class to store caption data<commit_after>
|
class Caption(object):
def __init__(self, start, end, lines=None):
self.start = start
self.end = end
self.lines = lines or []
def add_line(self, line):
self.lines.append(line)
|
Add class to store caption data
class Caption(object):
def __init__(self, start, end, lines=None):
self.start = start
self.end = end
self.lines = lines or []
def add_line(self, line):
self.lines.append(line)
|
<commit_before><commit_msg>Add class to store caption data<commit_after>
class Caption(object):
def __init__(self, start, end, lines=None):
self.start = start
self.end = end
self.lines = lines or []
def add_line(self, line):
self.lines.append(line)
|
|
aea4f35e51f244ad191de37ca0f634b4917ae78d
|
07/test_address.py
|
07/test_address.py
|
import unittest
from address import has_reflection, is_compatible
class TestAddress(unittest.TestCase):
def test_has_reflection(self):
assert has_reflection(['mnop']) == False
assert has_reflection(['abba', 'qrst']) == True
def test_is_compatible(self):
assert is_compatible('abba[mnop]qrst') == True
assert is_compatible('abcd[bddb]xyyx') == False
assert is_compatible('aaaa[qwer]tyui') == False
assert is_compatible('ioxxoj[asdfgh]zxcvbn') == True
|
Test part 1 of day 7.
|
Test part 1 of day 7.
|
Python
|
mit
|
machinelearningdeveloper/aoc_2016
|
Test part 1 of day 7.
|
import unittest
from address import has_reflection, is_compatible
class TestAddress(unittest.TestCase):
def test_has_reflection(self):
assert has_reflection(['mnop']) == False
assert has_reflection(['abba', 'qrst']) == True
def test_is_compatible(self):
assert is_compatible('abba[mnop]qrst') == True
assert is_compatible('abcd[bddb]xyyx') == False
assert is_compatible('aaaa[qwer]tyui') == False
assert is_compatible('ioxxoj[asdfgh]zxcvbn') == True
|
<commit_before><commit_msg>Test part 1 of day 7.<commit_after>
|
import unittest
from address import has_reflection, is_compatible
class TestAddress(unittest.TestCase):
def test_has_reflection(self):
assert has_reflection(['mnop']) == False
assert has_reflection(['abba', 'qrst']) == True
def test_is_compatible(self):
assert is_compatible('abba[mnop]qrst') == True
assert is_compatible('abcd[bddb]xyyx') == False
assert is_compatible('aaaa[qwer]tyui') == False
assert is_compatible('ioxxoj[asdfgh]zxcvbn') == True
|
Test part 1 of day 7.import unittest
from address import has_reflection, is_compatible
class TestAddress(unittest.TestCase):
def test_has_reflection(self):
assert has_reflection(['mnop']) == False
assert has_reflection(['abba', 'qrst']) == True
def test_is_compatible(self):
assert is_compatible('abba[mnop]qrst') == True
assert is_compatible('abcd[bddb]xyyx') == False
assert is_compatible('aaaa[qwer]tyui') == False
assert is_compatible('ioxxoj[asdfgh]zxcvbn') == True
|
<commit_before><commit_msg>Test part 1 of day 7.<commit_after>import unittest
from address import has_reflection, is_compatible
class TestAddress(unittest.TestCase):
def test_has_reflection(self):
assert has_reflection(['mnop']) == False
assert has_reflection(['abba', 'qrst']) == True
def test_is_compatible(self):
assert is_compatible('abba[mnop]qrst') == True
assert is_compatible('abcd[bddb]xyyx') == False
assert is_compatible('aaaa[qwer]tyui') == False
assert is_compatible('ioxxoj[asdfgh]zxcvbn') == True
|
|
329c3d31b76b22c481278a7c0994c9a7ba3f2852
|
migrations/versions/1020_add_buyer_email_domain_table.py
|
migrations/versions/1020_add_buyer_email_domain_table.py
|
"""Add buyer email domain table
Revision ID: 1020
Revises: 1010
Create Date: 2017-10-10 15:18:22.683693
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1020'
down_revision = '1010'
def upgrade():
op.create_table(
'buyer_email_domains',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('domain_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('buyer_email_domains_pkey'))
)
op.create_unique_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', ['domain_name']
)
def downgrade():
op.drop_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', type_='unique'
)
op.drop_table('buyer_email_domains')
|
Add new buyer email domain table
|
Add new buyer email domain table
A simple table to store the email domain (currently held in a text
file).
The domain_name field has a uniqueness constraint.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add new buyer email domain table
A simple table to store the email domain (currently held in a text
file).
The domain_name field has a uniqueness constraint.
|
"""Add buyer email domain table
Revision ID: 1020
Revises: 1010
Create Date: 2017-10-10 15:18:22.683693
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1020'
down_revision = '1010'
def upgrade():
op.create_table(
'buyer_email_domains',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('domain_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('buyer_email_domains_pkey'))
)
op.create_unique_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', ['domain_name']
)
def downgrade():
op.drop_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', type_='unique'
)
op.drop_table('buyer_email_domains')
|
<commit_before><commit_msg>Add new buyer email domain table
A simple table to store the email domain (currently held in a text
file).
The domain_name field has a uniqueness constraint.<commit_after>
|
"""Add buyer email domain table
Revision ID: 1020
Revises: 1010
Create Date: 2017-10-10 15:18:22.683693
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1020'
down_revision = '1010'
def upgrade():
op.create_table(
'buyer_email_domains',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('domain_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('buyer_email_domains_pkey'))
)
op.create_unique_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', ['domain_name']
)
def downgrade():
op.drop_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', type_='unique'
)
op.drop_table('buyer_email_domains')
|
Add new buyer email domain table
A simple table to store the email domain (currently held in a text
file).
The domain_name field has a uniqueness constraint."""Add buyer email domain table
Revision ID: 1020
Revises: 1010
Create Date: 2017-10-10 15:18:22.683693
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1020'
down_revision = '1010'
def upgrade():
op.create_table(
'buyer_email_domains',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('domain_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('buyer_email_domains_pkey'))
)
op.create_unique_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', ['domain_name']
)
def downgrade():
op.drop_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', type_='unique'
)
op.drop_table('buyer_email_domains')
|
<commit_before><commit_msg>Add new buyer email domain table
A simple table to store the email domain (currently held in a text
file).
The domain_name field has a uniqueness constraint.<commit_after>"""Add buyer email domain table
Revision ID: 1020
Revises: 1010
Create Date: 2017-10-10 15:18:22.683693
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1020'
down_revision = '1010'
def upgrade():
op.create_table(
'buyer_email_domains',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('domain_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('buyer_email_domains_pkey'))
)
op.create_unique_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', ['domain_name']
)
def downgrade():
op.drop_constraint(
op.f('uq_buyer_email_domains_domain_name'), 'buyer_email_domains', type_='unique'
)
op.drop_table('buyer_email_domains')
|
|
a812d3054e25768c19a470bf090b298e11786ed3
|
openstack/common/sslutils.py
|
openstack/common/sslutils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from openstack.common import cfg
from openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
|
Support for SSL in wsgi.Service
|
Support for SSL in wsgi.Service
Enable support for SSL as well using code from glance. We
have some new options for configuring the SSL support.
test_app starts wsgi.Service with a test app, then creates
a url to make sure the http requests are actually served
properly
test_app_using_router adds wsgi.Router and Mapper() to the
mix along with using the wsgify annotation for serving
the http requests
Fixes LP# 979488 (partial)
Fixes LP# 869884 (partial)
DocImpact
Change-Id: Iae47b13b50e00c102c8c36f4a3e73b24fa4e6303
|
Python
|
apache-2.0
|
poznyakandrey/oslo.service,eezhova/oslo.service,openstack/oslo.service,citrix-openstack-build/oslo.service
|
Support for SSL in wsgi.Service
Enable support for SSL as well using code from glance. We
have some new options for configuring the SSL support.
test_app starts wsgi.Service with a test app, then creates
a url to make sure the http requests are actually served
properly
test_app_using_router adds wsgi.Router and Mapper() to the
mix along with using the wsgify annotation for serving
the http requests
Fixes LP# 979488 (partial)
Fixes LP# 869884 (partial)
DocImpact
Change-Id: Iae47b13b50e00c102c8c36f4a3e73b24fa4e6303
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from openstack.common import cfg
from openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
|
<commit_before><commit_msg>Support for SSL in wsgi.Service
Enable support for SSL as well using code from glance. We
have some new options for configuring the SSL support.
test_app starts wsgi.Service with a test app, then creates
a url to make sure the http requests are actually served
properly
test_app_using_router adds wsgi.Router and Mapper() to the
mix along with using the wsgify annotation for serving
the http requests
Fixes LP# 979488 (partial)
Fixes LP# 869884 (partial)
DocImpact
Change-Id: Iae47b13b50e00c102c8c36f4a3e73b24fa4e6303<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from openstack.common import cfg
from openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
|
Support for SSL in wsgi.Service
Enable support for SSL as well using code from glance. We
have some new options for configuring the SSL support.
test_app starts wsgi.Service with a test app, then creates
a url to make sure the http requests are actually served
properly
test_app_using_router adds wsgi.Router and Mapper() to the
mix along with using the wsgify annotation for serving
the http requests
Fixes LP# 979488 (partial)
Fixes LP# 869884 (partial)
DocImpact
Change-Id: Iae47b13b50e00c102c8c36f4a3e73b24fa4e6303# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from openstack.common import cfg
from openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
|
<commit_before><commit_msg>Support for SSL in wsgi.Service
Enable support for SSL as well using code from glance. We
have some new options for configuring the SSL support.
test_app starts wsgi.Service with a test app, then creates
a url to make sure the http requests are actually served
properly
test_app_using_router adds wsgi.Router and Mapper() to the
mix along with using the wsgify annotation for serving
the http requests
Fixes LP# 979488 (partial)
Fixes LP# 869884 (partial)
DocImpact
Change-Id: Iae47b13b50e00c102c8c36f4a3e73b24fa4e6303<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from openstack.common import cfg
from openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
|
|
da466b391470333492a56395569812653ed6658f
|
compose/cli/__init__.py
|
compose/cli/__init__.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import red
print(
red('ERROR:'),
"Dependency conflict: an older version of the 'docker-py' package "
"is polluting the namespace. "
"Run the following command to remedy the issue:\n"
"pip uninstall docker docker-py; pip install docker",
file=sys.stderr
)
sys.exit(1)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import yellow
print(
yellow('WARNING:'),
"Dependency conflict: an older version of the 'docker-py' package "
"may be polluting the namespace. "
"If you're experiencing crashes, run the following command to remedy the issue:\n"
"pip uninstall docker-py; pip uninstall docker; pip install docker",
file=sys.stderr
)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
Change docker-py dependency error to a warning, update fix command
|
Change docker-py dependency error to a warning, update fix command
Signed-off-by: Joffrey F <2e95f49799afcec0080c0aeb8813776d949e0768@docker.com>
|
Python
|
apache-2.0
|
thaJeztah/compose,shin-/compose,vdemeester/compose,sdurrheimer/compose,sdurrheimer/compose,schmunk42/compose,hoogenm/compose,jrabbit/compose,dnephin/compose,dnephin/compose,schmunk42/compose,swoopla/compose,funkyfuture/docker-compose,shin-/compose,thaJeztah/compose,hoogenm/compose,funkyfuture/docker-compose,jrabbit/compose,swoopla/compose,vdemeester/compose
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import red
print(
red('ERROR:'),
"Dependency conflict: an older version of the 'docker-py' package "
"is polluting the namespace. "
"Run the following command to remedy the issue:\n"
"pip uninstall docker docker-py; pip install docker",
file=sys.stderr
)
sys.exit(1)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
Change docker-py dependency error to a warning, update fix command
Signed-off-by: Joffrey F <2e95f49799afcec0080c0aeb8813776d949e0768@docker.com>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import yellow
print(
yellow('WARNING:'),
"Dependency conflict: an older version of the 'docker-py' package "
"may be polluting the namespace. "
"If you're experiencing crashes, run the following command to remedy the issue:\n"
"pip uninstall docker-py; pip uninstall docker; pip install docker",
file=sys.stderr
)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
<commit_before>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import red
print(
red('ERROR:'),
"Dependency conflict: an older version of the 'docker-py' package "
"is polluting the namespace. "
"Run the following command to remedy the issue:\n"
"pip uninstall docker docker-py; pip install docker",
file=sys.stderr
)
sys.exit(1)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
<commit_msg>Change docker-py dependency error to a warning, update fix command
Signed-off-by: Joffrey F <2e95f49799afcec0080c0aeb8813776d949e0768@docker.com><commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import yellow
print(
yellow('WARNING:'),
"Dependency conflict: an older version of the 'docker-py' package "
"may be polluting the namespace. "
"If you're experiencing crashes, run the following command to remedy the issue:\n"
"pip uninstall docker-py; pip uninstall docker; pip install docker",
file=sys.stderr
)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import red
print(
red('ERROR:'),
"Dependency conflict: an older version of the 'docker-py' package "
"is polluting the namespace. "
"Run the following command to remedy the issue:\n"
"pip uninstall docker docker-py; pip install docker",
file=sys.stderr
)
sys.exit(1)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
Change docker-py dependency error to a warning, update fix command
Signed-off-by: Joffrey F <2e95f49799afcec0080c0aeb8813776d949e0768@docker.com>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import yellow
print(
yellow('WARNING:'),
"Dependency conflict: an older version of the 'docker-py' package "
"may be polluting the namespace. "
"If you're experiencing crashes, run the following command to remedy the issue:\n"
"pip uninstall docker-py; pip uninstall docker; pip install docker",
file=sys.stderr
)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
<commit_before>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import red
print(
red('ERROR:'),
"Dependency conflict: an older version of the 'docker-py' package "
"is polluting the namespace. "
"Run the following command to remedy the issue:\n"
"pip uninstall docker docker-py; pip install docker",
file=sys.stderr
)
sys.exit(1)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
<commit_msg>Change docker-py dependency error to a warning, update fix command
Signed-off-by: Joffrey F <2e95f49799afcec0080c0aeb8813776d949e0768@docker.com><commit_after>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
# Attempt to detect https://github.com/docker/compose/issues/4344
try:
# We don't try importing pip because it messes with package imports
# on some Linux distros (Ubuntu, Fedora)
# https://github.com/docker/compose/issues/4425
# https://github.com/docker/compose/issues/4481
# https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
s_cmd = subprocess.Popen(
['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
packages = s_cmd.communicate()[0].splitlines()
dockerpy_installed = len(
list(filter(lambda p: p.startswith(b'docker-py=='), packages))
) > 0
if dockerpy_installed:
from .colors import yellow
print(
yellow('WARNING:'),
"Dependency conflict: an older version of the 'docker-py' package "
"may be polluting the namespace. "
"If you're experiencing crashes, run the following command to remedy the issue:\n"
"pip uninstall docker-py; pip uninstall docker; pip install docker",
file=sys.stderr
)
except OSError:
# pip command is not available, which indicates it's probably the binary
# distribution of Compose which is not affected
pass
|
004e5411e94ed3a2f372f8a66b4683eebb5da11a
|
python_version_compat_test.py
|
python_version_compat_test.py
|
#!/usr/bin/env python
import subprocess
import unittest
class TestPy2Py3Compat(unittest.TestCase):
"""We need to be compatible with both python2 and 3.
Test that we can at least import runlint.py under both.
"""
def test_python2_compat(self):
subprocess.check_call(['python2', '-c', 'import runlint'])
def test_python3_compat(self):
subprocess.check_call(['python3', '-c', 'import runlint'])
|
Add a test that imports runlint under both python2 and python3
|
Add a test that imports runlint under both python2 and python3
Summary:
khan-linter now runs under both python2 and python3, but it's going to be easy
to break that. This commit adds a test that checks that runlint.py can be
imported in both python2 and python3. This won't catch everything, but will
prevent a few common issues, like print statements without parens and octal
literals for permissions.
Test Plan:
- `python -m unittest python_version_compat_test`
- verify it passes
- add a `print 'broken'` to runlint.py
- repeat, and verify the python3 test fails
Reviewers: csilvers
Reviewed By: csilvers
Subscribers: tom, kevinb
Differential Revision: https://phabricator.khanacademy.org/D35087
|
Python
|
apache-2.0
|
Khan/khan-linter,Khan/khan-linter,Khan/khan-linter,Khan/khan-linter
|
Add a test that imports runlint under both python2 and python3
Summary:
khan-linter now runs under both python2 and python3, but it's going to be easy
to break that. This commit adds a test that checks that runlint.py can be
imported in both python2 and python3. This won't catch everything, but will
prevent a few common issues, like print statements without parens and octal
literals for permissions.
Test Plan:
- `python -m unittest python_version_compat_test`
- verify it passes
- add a `print 'broken'` to runlint.py
- repeat, and verify the python3 test fails
Reviewers: csilvers
Reviewed By: csilvers
Subscribers: tom, kevinb
Differential Revision: https://phabricator.khanacademy.org/D35087
|
#!/usr/bin/env python
import subprocess
import unittest
class TestPy2Py3Compat(unittest.TestCase):
"""We need to be compatible with both python2 and 3.
Test that we can at least import runlint.py under both.
"""
def test_python2_compat(self):
subprocess.check_call(['python2', '-c', 'import runlint'])
def test_python3_compat(self):
subprocess.check_call(['python3', '-c', 'import runlint'])
|
<commit_before><commit_msg>Add a test that imports runlint under both python2 and python3
Summary:
khan-linter now runs under both python2 and python3, but it's going to be easy
to break that. This commit adds a test that checks that runlint.py can be
imported in both python2 and python3. This won't catch everything, but will
prevent a few common issues, like print statements without parens and octal
literals for permissions.
Test Plan:
- `python -m unittest python_version_compat_test`
- verify it passes
- add a `print 'broken'` to runlint.py
- repeat, and verify the python3 test fails
Reviewers: csilvers
Reviewed By: csilvers
Subscribers: tom, kevinb
Differential Revision: https://phabricator.khanacademy.org/D35087<commit_after>
|
#!/usr/bin/env python
import subprocess
import unittest
class TestPy2Py3Compat(unittest.TestCase):
"""We need to be compatible with both python2 and 3.
Test that we can at least import runlint.py under both.
"""
def test_python2_compat(self):
subprocess.check_call(['python2', '-c', 'import runlint'])
def test_python3_compat(self):
subprocess.check_call(['python3', '-c', 'import runlint'])
|
Add a test that imports runlint under both python2 and python3
Summary:
khan-linter now runs under both python2 and python3, but it's going to be easy
to break that. This commit adds a test that checks that runlint.py can be
imported in both python2 and python3. This won't catch everything, but will
prevent a few common issues, like print statements without parens and octal
literals for permissions.
Test Plan:
- `python -m unittest python_version_compat_test`
- verify it passes
- add a `print 'broken'` to runlint.py
- repeat, and verify the python3 test fails
Reviewers: csilvers
Reviewed By: csilvers
Subscribers: tom, kevinb
Differential Revision: https://phabricator.khanacademy.org/D35087#!/usr/bin/env python
import subprocess
import unittest
class TestPy2Py3Compat(unittest.TestCase):
"""We need to be compatible with both python2 and 3.
Test that we can at least import runlint.py under both.
"""
def test_python2_compat(self):
subprocess.check_call(['python2', '-c', 'import runlint'])
def test_python3_compat(self):
subprocess.check_call(['python3', '-c', 'import runlint'])
|
<commit_before><commit_msg>Add a test that imports runlint under both python2 and python3
Summary:
khan-linter now runs under both python2 and python3, but it's going to be easy
to break that. This commit adds a test that checks that runlint.py can be
imported in both python2 and python3. This won't catch everything, but will
prevent a few common issues, like print statements without parens and octal
literals for permissions.
Test Plan:
- `python -m unittest python_version_compat_test`
- verify it passes
- add a `print 'broken'` to runlint.py
- repeat, and verify the python3 test fails
Reviewers: csilvers
Reviewed By: csilvers
Subscribers: tom, kevinb
Differential Revision: https://phabricator.khanacademy.org/D35087<commit_after>#!/usr/bin/env python
import subprocess
import unittest
class TestPy2Py3Compat(unittest.TestCase):
"""We need to be compatible with both python2 and 3.
Test that we can at least import runlint.py under both.
"""
def test_python2_compat(self):
subprocess.check_call(['python2', '-c', 'import runlint'])
def test_python3_compat(self):
subprocess.check_call(['python3', '-c', 'import runlint'])
|
|
09c48a690958502568b76dc10bce751614a63f38
|
udpSender.py
|
udpSender.py
|
import socket
UDP_IP = "192.168.1.1"
UDP_PORT = 1234
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
Add udp sender for debug purpose
|
Add udp sender for debug purpose
|
Python
|
mit
|
baptistelabat/orgie
|
Add udp sender for debug purpose
|
import socket
UDP_IP = "192.168.1.1"
UDP_PORT = 1234
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
<commit_before><commit_msg>Add udp sender for debug purpose<commit_after>
|
import socket
UDP_IP = "192.168.1.1"
UDP_PORT = 1234
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
Add udp sender for debug purposeimport socket
UDP_IP = "192.168.1.1"
UDP_PORT = 1234
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
<commit_before><commit_msg>Add udp sender for debug purpose<commit_after>import socket
UDP_IP = "192.168.1.1"
UDP_PORT = 1234
MESSAGE = "Hello, World!"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.